--- image.c 2003/10/01 23:23:01 1.26.2.10 +++ image.c 2004/01/30 18:53:50 1.26.2.14 @@ -19,7 +19,7 @@ * along with this program ; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * - * $Id: image.c,v 1.26.2.10 2003/10/01 23:23:01 edgomez Exp $ + * $Id: image.c,v 1.26.2.14 2004/01/30 18:53:50 chl Exp $ * ****************************************************************************/ @@ -258,13 +258,13 @@ n_ptr = refn->y; h_ptr = refh->y; v_ptr = refv->y; - hv_ptr = refhv->y; n_ptr -= offset; h_ptr -= offset; v_ptr -= offset; - hv_ptr -= offset; + /* Note we initialize the hv pointer later, as we can optimize code a bit + * doing it down to up in quarterpel and up to down in halfpel */ if(quarterpel) { for (y = 0; y < (edged_height - EDGE_SIZE); y += 8) { @@ -286,24 +286,25 @@ n_ptr += stride_add; } - h_ptr = refh->y; - h_ptr -= offset; + h_ptr = refh->y + (edged_height - EDGE_SIZE - EDGE_SIZE2)*edged_width - EDGE_SIZE2; + hv_ptr = refhv->y + (edged_height - EDGE_SIZE - EDGE_SIZE2)*edged_width - EDGE_SIZE2; for (y = 0; y < (edged_height - EDGE_SIZE); y = y + 8) { + hv_ptr -= stride_add; + h_ptr -= stride_add; + hv_ptr -= EDGE_SIZE; + h_ptr -= EDGE_SIZE; + for (x = 0; x < (edged_width - EDGE_SIZE); x = x + 8) { + hv_ptr -= 8; + h_ptr -= 8; interpolate8x8_6tap_lowpass_v(hv_ptr, h_ptr, edged_width, rounding); - hv_ptr += 8; - h_ptr += 8; } - - hv_ptr += EDGE_SIZE; - h_ptr += EDGE_SIZE; - - hv_ptr += stride_add; - h_ptr += stride_add; } - } - else { + } else { + + hv_ptr = refhv->y; + hv_ptr -= offset; for (y = 0; y < (edged_height - EDGE_SIZE); y += 8) { for (x = 0; x < (edged_width - EDGE_SIZE); x += 8) { @@ -597,6 +598,14 @@ interlacing?rgbai_to_yv12 :rgba_to_yv12, interlacing?rgbai_to_yv12_c:rgba_to_yv12_c, 4); break; + + case XVID_CSP_ARGB: + safe_packed_conv( + src[0], src_stride[0], image->y, image->u, image->v, + edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), + interlacing?argbi_to_yv12 : argb_to_yv12, + interlacing?argbi_to_yv12_c: argb_to_yv12_c, 4); + break; case XVID_CSP_YUY2: safe_packed_conv( @@ -622,22 +631,21 @@ interlacing?uyvyi_to_yv12_c:uyvy_to_yv12_c, 2); break; - case XVID_CSP_I420: + case XVID_CSP_YV12: /* YCrCb == internal colorspace for MPEG */ yv12_to_yv12(image->y, image->u, image->v, edged_width, edged_width2, src[0], src[0] + src_stride[0]*height, src[0] + src_stride[0]*height + (src_stride[0]/2)*height2, src_stride[0], src_stride[0]/2, width, height, (csp & XVID_CSP_VFLIP)); - break - ; - case XVID_CSP_YV12: /* u/v swapped */ + break; + + case XVID_CSP_I420: /* YCbCr == U and V plane swapped */ yv12_to_yv12(image->y, image->v, image->u, edged_width, edged_width2, src[0], src[0] + src_stride[0]*height, src[0] + src_stride[0]*height + (src_stride[0]/2)*height2, src_stride[0], src_stride[0]/2, width, height, (csp & XVID_CSP_VFLIP)); break; - case XVID_CSP_USER: - /*XXX: support for different u & v strides */ + case XVID_CSP_PLANAR: /* YCbCr with arbitrary pointers and different strides for Y and UV */ yv12_to_yv12(image->y, image->u, image->v, edged_width, edged_width2, - src[0], src[1], src[2], src_stride[0], src_stride[1], + src[0], src[1], src[2], src_stride[0], src_stride[1], /* v: dst_stride[2] not yet supported */ width, height, (csp & XVID_CSP_VFLIP)); break; @@ -767,6 +775,14 @@ interlacing?yv12_to_rgbai_c:yv12_to_rgba_c, 4); return 0; + case XVID_CSP_ARGB: + safe_packed_conv( + dst[0], dst_stride[0], image->y, image->u, image->v, + edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP), + interlacing?yv12_to_argbi :yv12_to_argb, + interlacing?yv12_to_argbi_c:yv12_to_argb_c, 4); + return 0; + case XVID_CSP_YUY2: safe_packed_conv( dst[0], dst_stride[0], image->y, image->u, image->v, @@ -791,24 +807,24 @@ interlacing?yv12_to_uyvyi_c:yv12_to_uyvy_c, 2); return 0; - case XVID_CSP_I420: + case XVID_CSP_YV12: /* YCbCr == internal colorspace for MPEG */ yv12_to_yv12(dst[0], dst[0] + dst_stride[0]*height, dst[0] + dst_stride[0]*height + (dst_stride[0]/2)*height2, dst_stride[0], dst_stride[0]/2, image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP)); return 0; - case XVID_CSP_YV12: /* u,v swapped */ + case XVID_CSP_I420: /* YCrCb == U and V plane swapped */ yv12_to_yv12(dst[0], dst[0] + dst_stride[0]*height, dst[0] + dst_stride[0]*height + (dst_stride[0]/2)*height2, dst_stride[0], dst_stride[0]/2, image->y, image->v, image->u, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP)); return 0; - case XVID_CSP_USER : /* u,v swapped */ + case XVID_CSP_PLANAR: /* YCbCr with arbitrary pointers and different strides for Y and UV */ yv12_to_yv12(dst[0], dst[1], dst[2], - dst_stride[0], dst_stride[1], /* v: dst_stride[2] */ - image->y, image->v, image->u, edged_width, edged_width2, + dst_stride[0], dst_stride[1], /* v: dst_stride[2] not yet supported */ + image->y, image->u, image->v, edged_width, edged_width2, width, height, (csp & XVID_CSP_VFLIP)); return 0;