--- image.c 2002/11/27 14:29:34 1.20.2.8 +++ image.c 2002/12/17 12:56:37 1.20.2.12 @@ -59,6 +59,7 @@ #include "image.h" #include "colorspace.h" #include "interpolate8x8.h" +#include "reduced.h" #include "../divx4.h" #include "../utils/mem_align.h" @@ -475,6 +476,48 @@ } +/* +chroma optimize filter, invented by mf +a chroma pixel is average from the surrounding pixels, when the +correpsonding luma pixels are pure black or white. +*/ + +void +image_chroma_optimize(IMAGE * img, int width, int height, int edged_width) +{ + int x,y; + int pixels = 0; + + for (y = 1; y < height/2 - 1; y++) + for (x = 1; x < width/2 - 1; x++) + { +#define IS_PURE(a) ((a)<=16||(a)>=235) +#define IMG_Y(Y,X) img->y[(Y)*edged_width + (X)] +#define IMG_U(Y,X) img->u[(Y)*edged_width/2 + (X)] +#define IMG_V(Y,X) img->v[(Y)*edged_width/2 + (X)] + + if (IS_PURE(IMG_Y(y*2 ,x*2 )) && + IS_PURE(IMG_Y(y*2 ,x*2+1)) && + IS_PURE(IMG_Y(y*2+1,x*2 )) && + IS_PURE(IMG_Y(y*2+1,x*2+1))) + { + IMG_U(y,x) = (IMG_U(y,x-1) + IMG_U(y-1, x) + IMG_U(y, x+1) + IMG_U(y+1, x)) / 4; + IMG_V(y,x) = (IMG_V(y,x-1) + IMG_V(y-1, x) + IMG_V(y, x+1) + IMG_V(y+1, x)) / 4; + pixels++; + } + +#undef IS_PURE +#undef IMG_Y +#undef IMG_U +#undef IMG_V + } + + DPRINTF(DPRINTF_DEBUG,"chroma_optimized_pixels = %i/%i", pixels, width*height/4); +} + + + + /* perform safe packed colorspace conversion, by splitting @@ -634,14 +677,14 @@ case XVID_CSP_I420: yv12_to_yv12(image->y, image->u, image->v, edged_width, edged_width2, - src, src + width*height, src + width*height + width2*height2, - width, width2, width, height, (csp & XVID_CSP_VFLIP)); + src, src + src_stride*height, src + src_stride*height + (src_stride/2)*height2, + src_stride, src_stride/2, width, height, (csp & XVID_CSP_VFLIP)); break ; case XVID_CSP_YV12: /* u/v swapped */ yv12_to_yv12(image->y, image->v, image->u, edged_width, edged_width2, - src, src + width*height, src + width*height + width2*height2, - width, width2, width, height, (csp & XVID_CSP_VFLIP)); + src, src + src_stride*height, src + src_stride*height + (src_stride/2)*height2, + src_stride, src_stride/2, width, height, (csp & XVID_CSP_VFLIP)); break; case XVID_CSP_USER: @@ -722,7 +765,6 @@ int interlacing) { const int edged_width2 = edged_width/2; - int width2 = width/2; int height2 = height/2; /* @@ -831,15 +873,15 @@ return 0; case XVID_CSP_I420: - yv12_to_yv12(dst, dst + width*height, dst + width*height + width2*height2, - width, width2, + yv12_to_yv12(dst, dst + dst_stride*height, dst + dst_stride*height + (dst_stride/2)*height2, + dst_stride, dst_stride/2, image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP)); return 0; case XVID_CSP_YV12: // u,v swapped - yv12_to_yv12(dst, dst + width*height, dst + width*height + width2*height2, - width, width2, + yv12_to_yv12(dst, dst + dst_stride*height, dst + dst_stride*height + (dst_stride/2)*height2, + dst_stride, dst_stride/2, image->y, image->v, image->u, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP)); return 0; @@ -1055,3 +1097,106 @@ sV += std2; } } + + +void +image_clear(IMAGE * img, int width, int height, int edged_width, + int y, int u, int v) +{ + uint8_t * p; + int i; + + p = img->y; + for (i = 0; i < height; i++) { + memset(p, y, width); + p += edged_width; + } + + p = img->u; + for (i = 0; i < height/2; i++) { + memset(p, u, width/2); + p += edged_width/2; + } + + p = img->v; + for (i = 0; i < height/2; i++) { + memset(p, v, width/2); + p += edged_width/2; + } +} + + +/* reduced resolution deblocking filter + block = block size (16=rrv, 8=full resolution) + flags = XVID_DEC_YDEBLOCK|XVID_DEC_UVDEBLOCK +*/ +void +image_deblock_rrv(IMAGE * img, int edged_width, + const MACROBLOCK * mbs, int mb_width, int mb_height, int mb_stride, + int block, int flags) +{ + const int edged_width2 = edged_width /2; + const int nblocks = block / 8; /* skals code uses 8pixel block uints */ + int i,j; + + /* luma: j,i in block units */ + if ((flags & XVID_DEC_DEBLOCKY)) + { + for (j = 1; j < mb_height*2; j++) /* horizontal deblocking */ + for (i = 0; i < mb_width*2; i++) + { + if (mbs[(j-1)/2*mb_stride + (i/2)].mode != MODE_NOT_CODED || + mbs[(j+0)/2*mb_stride + (i/2)].mode != MODE_NOT_CODED) + { + xvid_HFilter_31_C(img->y + (j*block - 1)*edged_width + i*block, + img->y + (j*block + 0)*edged_width + i*block, nblocks); + } + } + + for (j = 0; j < mb_height*2; j++) /* vertical deblocking */ + for (i = 1; i < mb_width*2; i++) + { + if (mbs[(j/2)*mb_stride + (i-1)/2].mode != MODE_NOT_CODED || + mbs[(j/2)*mb_stride + (i+0)/2].mode != MODE_NOT_CODED) + { + vfilter_31(img->y + (j*block)*edged_width + i*block - 1, + img->y + (j*block)*edged_width + i*block + 0, + edged_width, nblocks); + } + } + } + + + /* chroma */ + if ((flags & XVID_DEC_DEBLOCKUV)) + { + for (j = 0; j < mb_height; j++) /* horizontal deblocking */ + for (i = 1; i < mb_width; i++) + { + if (mbs[j*mb_stride + i - 1].mode != MODE_NOT_CODED || + mbs[j*mb_stride + i + 0].mode != MODE_NOT_CODED) + { + vfilter_31(img->u + (j*block)*edged_width2 + i*block - 1, + img->u + (j*block)*edged_width2 + i*block + 0, + edged_width2, nblocks); + vfilter_31(img->v + (j*block)*edged_width2 + i*block - 1, + img->v + (j*block)*edged_width2 + i*block + 0, + edged_width2, nblocks); + } + } + + for (j = 1; j < mb_height; j++) /* vertical deblocking */ + for (i = 0; i < mb_width; i++) + { + if (mbs[(j-1)*mb_stride + i].mode != MODE_NOT_CODED || + mbs[(j+0)*mb_stride + i].mode != MODE_NOT_CODED) + { + hfilter_31(img->u + (j*block - 1)*edged_width2 + i*block, + img->u + (j*block + 0)*edged_width2 + i*block, nblocks); + hfilter_31(img->v + (j*block - 1)*edged_width2 + i*block, + img->v + (j*block + 0)*edged_width2 + i*block, nblocks); + } + } + } + +}