3 |
* XVID MPEG-4 VIDEO CODEC |
* XVID MPEG-4 VIDEO CODEC |
4 |
* - Image management functions - |
* - Image management functions - |
5 |
* |
* |
6 |
* Copyright(C) 2001-2004 Peter Ross <pross@xvid.org> |
* Copyright(C) 2001-2010 Peter Ross <pross@xvid.org> |
7 |
* |
* |
8 |
* This program is free software ; you can redistribute it and/or modify |
* This program is free software ; you can redistribute it and/or modify |
9 |
* it under the terms of the GNU General Public License as published by |
* it under the terms of the GNU General Public License as published by |
127 |
memcpy(image1->v, image2->v, edged_width * height / 4); |
memcpy(image1->v, image2->v, edged_width * height / 4); |
128 |
} |
} |
129 |
|
|
130 |
/* setedges bug was fixed in this BS version */ |
/* setedges bug was in this BS versions */ |
131 |
#define SETEDGES_BUG_BEFORE 18 |
#define SETEDGES_BUG_BEFORE 18 |
132 |
|
#define SETEDGES_BUG_AFTER 57 |
133 |
|
#define SETEDGES_BUG_REFIXED 63 |
134 |
|
|
135 |
void |
void |
136 |
image_setedges(IMAGE * image, |
image_setedges(IMAGE * image, |
151 |
|
|
152 |
/* According to the Standard Clause 7.6.4, padding is done starting at 16 |
/* According to the Standard Clause 7.6.4, padding is done starting at 16 |
153 |
* pixel width and height multiples. This was not respected in old xvids */ |
* pixel width and height multiples. This was not respected in old xvids */ |
154 |
if (bs_version >= SETEDGES_BUG_BEFORE) { |
if ((bs_version >= SETEDGES_BUG_BEFORE && |
155 |
|
bs_version < SETEDGES_BUG_AFTER) || |
156 |
|
bs_version >= SETEDGES_BUG_REFIXED) { |
157 |
width = (width+15)&~15; |
width = (width+15)&~15; |
158 |
height = (height+15)&~15; |
height = (height+15)&~15; |
159 |
} |
} |
866 |
return (sse); |
return (sse); |
867 |
} |
} |
868 |
|
|
869 |
|
void image_block_variance(IMAGE * orig_image, |
870 |
|
uint16_t stride, |
871 |
|
MACROBLOCK *mbs, |
872 |
|
uint16_t mb_width, |
873 |
|
uint16_t mb_height) |
874 |
|
{ |
875 |
|
DECLARE_ALIGNED_MATRIX(sums, 1, 4, uint16_t, CACHE_LINE); |
876 |
|
DECLARE_ALIGNED_MATRIX(squares, 1, 4, uint32_t, CACHE_LINE); |
877 |
|
|
878 |
|
int x, y, i, j; |
879 |
|
uint8_t *orig_y = orig_image->y; |
880 |
|
uint8_t *orig_u = orig_image->u; |
881 |
|
uint8_t *orig_v = orig_image->v; |
882 |
|
|
883 |
|
for (y = 0; y < mb_height; y++) { |
884 |
|
for (x = 0; x < mb_width; x++) { |
885 |
|
MACROBLOCK *pMB = &mbs[x + y * mb_width]; |
886 |
|
uint32_t var4[4]; |
887 |
|
uint32_t sum = 0, square = 0; |
888 |
|
|
889 |
|
/* y-blocks */ |
890 |
|
for (j = 0; j < 2; j++) { |
891 |
|
for (i = 0; i < 2; i++) { |
892 |
|
int lsum = blocksum8(orig_y + ((y<<4) + (j<<3))*stride + (x<<4) + (i<<3), |
893 |
|
stride, sums, squares); |
894 |
|
int lsquare = (squares[0] + squares[1] + squares[2] + squares[3])<<6; |
895 |
|
|
896 |
|
sum += lsum; |
897 |
|
square += lsquare; |
898 |
|
|
899 |
|
var4[0] = (squares[0]<<4) - sums[0]*sums[0]; |
900 |
|
var4[1] = (squares[1]<<4) - sums[1]*sums[1]; |
901 |
|
var4[2] = (squares[2]<<4) - sums[2]*sums[2]; |
902 |
|
var4[3] = (squares[3]<<4) - sums[3]*sums[3]; |
903 |
|
|
904 |
|
pMB->rel_var8[j*2 + i] = lsquare - lsum*lsum; |
905 |
|
if (pMB->rel_var8[j*2 + i]) |
906 |
|
pMB->rel_var8[j*2 + i] = ((var4[0] + var4[1] + var4[2] + var4[3])<<8) / |
907 |
|
pMB->rel_var8[j*2 + i]; /* 4*(Var(Di)/Var(D)) */ |
908 |
|
else |
909 |
|
pMB->rel_var8[j*2 + i] = 64; |
910 |
|
} |
911 |
|
} |
912 |
|
|
913 |
|
/* u */ |
914 |
|
{ |
915 |
|
int lsum = blocksum8(orig_u + (y<<3)*(stride>>1) + (x<<3), |
916 |
|
stride, sums, squares); |
917 |
|
int lsquare = (squares[0] + squares[1] + squares[2] + squares[3])<<6; |
918 |
|
|
919 |
|
sum += lsum; |
920 |
|
square += lsquare; |
921 |
|
|
922 |
|
var4[0] = (squares[0]<<4) - sums[0]*sums[0]; |
923 |
|
var4[1] = (squares[1]<<4) - sums[1]*sums[1]; |
924 |
|
var4[2] = (squares[2]<<4) - sums[2]*sums[2]; |
925 |
|
var4[3] = (squares[3]<<4) - sums[3]*sums[3]; |
926 |
|
|
927 |
|
pMB->rel_var8[4] = lsquare - lsum*lsum; |
928 |
|
if (pMB->rel_var8[4]) |
929 |
|
pMB->rel_var8[4] = ((var4[0] + var4[1] + var4[2] + var4[3])<<8) / |
930 |
|
pMB->rel_var8[4]; /* 4*(Var(Di)/Var(D)) */ |
931 |
|
else |
932 |
|
pMB->rel_var8[4] = 64; |
933 |
|
} |
934 |
|
|
935 |
|
/* v */ |
936 |
|
{ |
937 |
|
int lsum = blocksum8(orig_v + (y<<3)*(stride>>1) + (x<<3), |
938 |
|
stride, sums, squares); |
939 |
|
int lsquare = (squares[0] + squares[1] + squares[2] + squares[3])<<6; |
940 |
|
|
941 |
|
sum += lsum; |
942 |
|
square += lsquare; |
943 |
|
|
944 |
|
var4[0] = (squares[0]<<4) - sums[0]*sums[0]; |
945 |
|
var4[1] = (squares[1]<<4) - sums[1]*sums[1]; |
946 |
|
var4[2] = (squares[2]<<4) - sums[2]*sums[2]; |
947 |
|
var4[3] = (squares[3]<<4) - sums[3]*sums[3]; |
948 |
|
|
949 |
|
pMB->rel_var8[5] = lsquare - lsum*lsum; |
950 |
|
if (pMB->rel_var8[5]) |
951 |
|
pMB->rel_var8[5] = ((var4[0] + var4[1] + var4[2] + var4[3])<<8) / |
952 |
|
pMB->rel_var8[5]; /* 4*(Var(Di)/Var(D)) */ |
953 |
|
else |
954 |
|
pMB->rel_var8[5] = 64; |
955 |
|
} |
956 |
|
|
957 |
|
} |
958 |
|
} |
959 |
|
} |
960 |
|
|
961 |
#if 0 |
#if 0 |
962 |
|
|
963 |
#include <stdio.h> |
#include <stdio.h> |