3 |
; * XVID MPEG-4 VIDEO CODEC |
; * XVID MPEG-4 VIDEO CODEC |
4 |
; * - RGB colorspace conversions - |
; * - RGB colorspace conversions - |
5 |
; * |
; * |
6 |
; * Copyright(C) 2002-2003 Michael Militzer <isibaar@xvid.org> |
; * Copyright(C) 2002-2008 Michael Militzer <michael@xvid.org> |
7 |
; * 2002-2003 Peter Ross <pross@xvid.org> |
; * 2002-2003 Peter Ross <pross@xvid.org> |
8 |
; * |
; * |
9 |
; * This program is free software ; you can redistribute it and/or modify |
; * This program is free software ; you can redistribute it and/or modify |
22 |
; * |
; * |
23 |
; ****************************************************************************/ |
; ****************************************************************************/ |
24 |
|
|
25 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
%ifdef MARK_FUNCS |
|
|
global _%1:function %1.endfunc-%1 |
|
|
%define %1 _%1:function %1.endfunc-%1 |
|
|
%else |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%endif |
|
|
%else |
|
|
%ifdef MARK_FUNCS |
|
|
global %1:function %1.endfunc-%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endif |
|
|
%endmacro |
|
26 |
|
|
27 |
;============================================================================= |
;============================================================================= |
28 |
; Some constants |
; Some constants |
54 |
; Read only data |
; Read only data |
55 |
;============================================================================= |
;============================================================================= |
56 |
|
|
57 |
%ifdef FORMAT_COFF |
DATA |
58 |
SECTION .rodata |
|
59 |
%else |
ALIGN SECTION_ALIGN |
|
SECTION .rodata align=16 |
|
|
%endif |
|
|
ALIGN 16 |
|
60 |
|
|
61 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
62 |
; RGB->YV12 multiplication matrices |
; RGB->YV12 multiplication matrices |
113 |
|
|
114 |
%macro BGR_TO_YV12 2 |
%macro BGR_TO_YV12 2 |
115 |
; y_out |
; y_out |
116 |
|
|
117 |
pxor mm4, mm4 |
pxor mm4, mm4 |
118 |
pxor mm5, mm5 |
pxor mm5, mm5 |
119 |
movd mm0, [edi] ; x_ptr[0...] |
movd mm0, [x_ptr] ; x_ptr[0...] |
120 |
movd mm2, [edi+edx] ; x_ptr[x_stride...] |
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
121 |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
122 |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
123 |
movq mm6, mm0 ; = [ |b4|g4|r4] |
movq mm6, mm0 ; = [ |b4|g4|r4] |
133 |
|
|
134 |
pxor mm4, mm4 |
pxor mm4, mm4 |
135 |
pxor mm5, mm5 |
pxor mm5, mm5 |
136 |
movd mm1, [edi+%1] ; src[%1...] |
movd mm1, [x_ptr+%1] ; src[%1...] |
137 |
movd mm3, [edi+edx+%1] ; src[x_stride+%1...] |
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
138 |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
139 |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
140 |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
148 |
paddd mm1, mm4 ; +[b] |
paddd mm1, mm4 ; +[b] |
149 |
paddd mm3, mm5 ; +[b] |
paddd mm3, mm5 ; +[b] |
150 |
|
|
151 |
push edx |
push x_stride |
152 |
|
|
153 |
movd edx, mm0 |
movd x_stride_d, mm0 |
154 |
shr edx, 8 |
shr x_stride, 8 |
155 |
add edx, Y_ADD |
add x_stride, Y_ADD |
156 |
mov [esi], dl ; y_ptr[0] |
mov [y_ptr], dl ; y_ptr[0] |
157 |
|
|
158 |
movd edx, mm1 |
movd x_stride_d, mm1 |
159 |
shr edx, 8 |
shr x_stride, 8 |
160 |
add edx, Y_ADD |
add x_stride, Y_ADD |
161 |
mov [esi + 1], dl ; y_ptr[1] |
mov [y_ptr + 1], dl ; y_ptr[1] |
162 |
|
|
163 |
movd edx, mm2 |
movd x_stride_d, mm2 |
164 |
shr edx, 8 |
shr x_stride, 8 |
165 |
add edx, Y_ADD |
add x_stride, Y_ADD |
166 |
mov [esi + eax + 0], dl ; y_ptr[y_stride + 0] |
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
167 |
|
|
168 |
movd edx, mm3 |
movd x_stride_d, mm3 |
169 |
shr edx, 8 |
shr x_stride, 8 |
170 |
add edx, Y_ADD |
add x_stride, Y_ADD |
171 |
mov [esi + eax + 1], dl ; y_ptr[y_stride + 1] |
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
172 |
|
|
173 |
; u_ptr, v_ptr |
; u_ptr, v_ptr |
174 |
movq mm0, mm6 ; = [ |b4|g4|r4] |
movq mm0, mm6 ; = [ |b4|g4|r4] |
181 |
paddd mm0, mm1 |
paddd mm0, mm1 |
182 |
paddd mm2, mm6 |
paddd mm2, mm6 |
183 |
|
|
184 |
movd edx, mm0 |
movd x_stride_d, mm0 |
185 |
shr edx, 10 |
shr x_stride, 10 |
186 |
add edx, U_ADD |
add x_stride, U_ADD |
187 |
mov [ebx], dl |
mov [u_ptr], dl |
188 |
|
|
189 |
movd edx, mm2 |
movd x_stride_d, mm2 |
190 |
shr edx, 10 |
shr x_stride, 10 |
191 |
add edx, V_ADD |
add x_stride, V_ADD |
192 |
mov [ecx], dl |
mov [v_ptr], dl |
193 |
|
|
194 |
pop edx |
pop x_stride |
195 |
%endmacro |
%endmacro |
196 |
|
|
197 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
211 |
; y_out |
; y_out |
212 |
pxor mm4, mm4 |
pxor mm4, mm4 |
213 |
pxor mm5, mm5 |
pxor mm5, mm5 |
214 |
movd mm0, [edi] ; x_ptr[0...] |
movd mm0, [x_ptr] ; x_ptr[0...] |
215 |
movd mm2, [edi+edx] ; x_ptr[x_stride...] |
movd mm2, [x_ptr+x_stride] ; x_ptr[x_stride...] |
216 |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
punpcklbw mm0, mm4 ; [ |b |g |r ] |
217 |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
punpcklbw mm2, mm5 ; [ |b |g |r ] |
218 |
movq mm6, mm0 ; = [ |b4|g4|r4] |
movq mm6, mm0 ; = [ |b4|g4|r4] |
228 |
|
|
229 |
pxor mm4, mm4 |
pxor mm4, mm4 |
230 |
pxor mm5, mm5 |
pxor mm5, mm5 |
231 |
movd mm1, [edi+%1] ; src[%1...] |
movd mm1, [x_ptr+%1] ; src[%1...] |
232 |
movd mm3, [edi+edx+%1] ; src[x_stride+%1...] |
movd mm3, [x_ptr+x_stride+%1] ; src[x_stride+%1...] |
233 |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
punpcklbw mm1, mm4 ; [ |b |g |r ] |
234 |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
punpcklbw mm3, mm5 ; [ |b |g |r ] |
235 |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
paddw mm6, mm1 ; +[ |b4|g4|r4] |
243 |
paddd mm1, mm4 ; +[b] |
paddd mm1, mm4 ; +[b] |
244 |
paddd mm3, mm5 ; +[b] |
paddd mm3, mm5 ; +[b] |
245 |
|
|
246 |
push edx |
push x_stride |
247 |
|
|
248 |
movd edx, mm0 |
movd x_stride_d, mm0 |
249 |
shr edx, 8 |
shr x_stride, 8 |
250 |
add edx, Y_ADD |
add x_stride, Y_ADD |
251 |
mov [esi], dl ; y_ptr[0] |
mov [y_ptr], dl ; y_ptr[0] |
252 |
|
|
253 |
movd edx, mm1 |
movd x_stride_d, mm1 |
254 |
shr edx, 8 |
shr x_stride, 8 |
255 |
add edx, Y_ADD |
add x_stride, Y_ADD |
256 |
mov [esi + 1], dl ; y_ptr[1] |
mov [y_ptr + 1], dl ; y_ptr[1] |
257 |
|
|
258 |
movd edx, mm2 |
movd x_stride_d, mm2 |
259 |
shr edx, 8 |
shr x_stride, 8 |
260 |
add edx, Y_ADD |
add x_stride, Y_ADD |
261 |
mov [esi + eax + 0], dl ; y_ptr[y_stride + 0] |
mov [y_ptr + y_stride + 0], dl ; y_ptr[y_stride + 0] |
262 |
|
|
263 |
movd edx, mm3 |
movd x_stride_d, mm3 |
264 |
shr edx, 8 |
shr x_stride, 8 |
265 |
add edx, Y_ADD |
add x_stride, Y_ADD |
266 |
mov [esi + eax + 1], dl ; y_ptr[y_stride + 1] |
mov [y_ptr + y_stride + 1], dl ; y_ptr[y_stride + 1] |
267 |
|
|
268 |
; u_ptr, v_ptr |
; u_ptr, v_ptr |
269 |
movq mm0, mm6 ; = [ |b4|g4|r4] |
movq mm0, mm6 ; = [ |b4|g4|r4] |
276 |
paddd mm0, mm1 |
paddd mm0, mm1 |
277 |
paddd mm2, mm6 |
paddd mm2, mm6 |
278 |
|
|
279 |
movd edx, mm0 |
movd x_stride_d, mm0 |
280 |
shr edx, 10 |
shr x_stride, 10 |
281 |
add edx, U_ADD |
add x_stride, U_ADD |
282 |
mov [ebx], dl |
mov [u_ptr], dl |
283 |
|
|
284 |
movd edx, mm2 |
movd x_stride_d, mm2 |
285 |
shr edx, 10 |
shr x_stride, 10 |
286 |
add edx, V_ADD |
add x_stride, V_ADD |
287 |
mov [ecx], dl |
mov [v_ptr], dl |
288 |
|
|
289 |
pop edx |
pop x_stride |
290 |
%endmacro |
%endmacro |
291 |
|
|
292 |
;------------------------------------------------------------------------------ |
;------------------------------------------------------------------------------ |
302 |
%endmacro |
%endmacro |
303 |
|
|
304 |
%macro YV12_TO_BGR 2 |
%macro YV12_TO_BGR 2 |
305 |
%define TEMP_Y1 esp |
%define TEMP_Y1 _ESP |
306 |
%define TEMP_Y2 esp + 8 |
%define TEMP_Y2 _ESP + 8 |
307 |
%define TEMP_G1 esp + 16 |
%define TEMP_G1 _ESP + 16 |
308 |
%define TEMP_G2 esp + 24 |
%define TEMP_G2 _ESP + 24 |
309 |
%define TEMP_B1 esp + 32 |
%define TEMP_B1 _ESP + 32 |
310 |
%define TEMP_B2 esp + 40 |
%define TEMP_B2 _ESP + 40 |
311 |
|
|
312 |
movd mm2, [ebx] ; u_ptr[0] |
movd mm2, [u_ptr] ; u_ptr[0] |
313 |
movd mm3, [ecx] ; v_ptr[0] |
movd mm3, [v_ptr] ; v_ptr[0] |
314 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
punpcklbw mm2, mm7 ; u3u2u1u0 -> mm2 |
315 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
punpcklbw mm3, mm7 ; v3v2v1v0 -> mm3 |
316 |
psubsw mm2, [U_SUB] ; U - 128 |
psubsw mm2, [U_SUB] ; U - 128 |
329 |
paddsw mm2, mm3 |
paddsw mm2, mm3 |
330 |
paddsw mm6, mm0 |
paddsw mm6, mm0 |
331 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
pmullw mm5, [VR_MUL] ; R_ADD -> mm5 |
332 |
movq mm0, [esi] ; y7y6y5y4y3y2y1y0 -> mm0 |
movq mm0, [y_ptr] ; y7y6y5y4y3y2y1y0 -> mm0 |
333 |
movq mm1, mm0 |
movq mm1, mm0 |
334 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
335 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
345 |
psraw mm0, SCALEBITS |
psraw mm0, SCALEBITS |
346 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
packuswb mm0, mm1 ;g7g6g5g4g3g2g1g0 -> mm0 |
347 |
movq [TEMP_G1], mm0 |
movq [TEMP_G1], mm0 |
348 |
movq mm0, [esi+eax] ; y7y6y5y4y3y2y1y0 -> mm0 |
movq mm0, [y_ptr+y_stride] ; y7y6y5y4y3y2y1y0 -> mm0 |
349 |
movq mm1, mm0 |
movq mm1, mm0 |
350 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
punpckhbw mm1, mm7 ; y7y6y5y4 -> mm1 |
351 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
punpcklbw mm0, mm7 ; y3y2y1y0 -> mm0 |
412 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
413 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
414 |
%if %1 == 3 ; BGR (24-bit) |
%if %1 == 3 ; BGR (24-bit) |
415 |
movd [edi], mm2 |
movd [x_ptr], mm2 |
416 |
psrlq mm2, 32 |
psrlq mm2, 32 |
417 |
movd [edi + 3], mm2 |
movd [x_ptr + 3], mm2 |
418 |
movd [edi + 6], mm4 |
movd [x_ptr + 6], mm4 |
419 |
psrlq mm4, 32 |
psrlq mm4, 32 |
420 |
movd [edi + 9], mm4 |
movd [x_ptr + 9], mm4 |
421 |
movd [edi + 12], mm0 |
movd [x_ptr + 12], mm0 |
422 |
psrlq mm0, 32 |
psrlq mm0, 32 |
423 |
movd [edi + 15], mm0 |
movd [x_ptr + 15], mm0 |
424 |
movq mm2, mm5 |
movq mm2, mm5 |
425 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
426 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
429 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
430 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
431 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
432 |
movq [edi + 16], mm5 |
movq [x_ptr + 16], mm5 |
433 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
434 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
435 |
movq mm2, mm0 |
movq mm2, mm0 |
444 |
movq mm5, mm0 |
movq mm5, mm0 |
445 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
446 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
447 |
movd [edi+edx], mm2 |
movd [x_ptr+x_stride], mm2 |
448 |
psrlq mm2, 32 |
psrlq mm2, 32 |
449 |
movd [edi+edx + 3], mm2 |
movd [x_ptr+x_stride + 3], mm2 |
450 |
movd [edi+edx + 6], mm4 |
movd [x_ptr+x_stride + 6], mm4 |
451 |
psrlq mm4, 32 |
psrlq mm4, 32 |
452 |
movd [edi+edx + 9], mm4 |
movd [x_ptr+x_stride + 9], mm4 |
453 |
movd [edi+edx + 12], mm0 |
movd [x_ptr+x_stride + 12], mm0 |
454 |
psrlq mm0, 32 |
psrlq mm0, 32 |
455 |
movd [edi+edx + 15], mm0 |
movd [x_ptr+x_stride + 15], mm0 |
456 |
movq mm2, mm5 |
movq mm2, mm5 |
457 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
psrlq mm0, 8 ; 000000r5g5 -> mm0 |
458 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
psllq mm2, 32 ; 0r6g6b60000 -> mm2 |
461 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
por mm0, mm2 ; 000r6g6b6r5g5 -> mm0 |
462 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
psllq mm5, 40 ; r7g7b700000 -> mm5 |
463 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
por mm5, mm0 ; r7g7b7r6g6b6r5g5 -> mm5 |
464 |
movq [edi + edx + 16], mm5 |
movq [x_ptr + x_stride + 16], mm5 |
465 |
|
|
466 |
%else ; BGRA (32-bit) |
%else ; BGRA (32-bit) |
467 |
movq [edi], mm2 |
movq [x_ptr], mm2 |
468 |
movq [edi + 8], mm4 |
movq [x_ptr + 8], mm4 |
469 |
movq [edi + 16], mm0 |
movq [x_ptr + 16], mm0 |
470 |
movq [edi + 24], mm5 |
movq [x_ptr + 24], mm5 |
471 |
movq mm0, [TEMP_B2] |
movq mm0, [TEMP_B2] |
472 |
movq mm1, [TEMP_G2] |
movq mm1, [TEMP_G2] |
473 |
movq mm2, mm0 |
movq mm2, mm0 |
482 |
movq mm5, mm0 |
movq mm5, mm0 |
483 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
punpcklbw mm0, mm3 ; 0r5g5b50r4g4b4 -> mm0 |
484 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
punpckhbw mm5, mm3 ; 0r7g7b70r6g6b6 -> mm5 |
485 |
movq [edi + edx], mm2 |
movq [x_ptr + x_stride], mm2 |
486 |
movq [edi + edx + 8], mm4 |
movq [x_ptr + x_stride + 8], mm4 |
487 |
movq [edi + edx + 16], mm0 |
movq [x_ptr + x_stride + 16], mm0 |
488 |
movq [edi + edx + 24], mm5 |
movq [x_ptr + x_stride + 24], mm5 |
489 |
%endif |
%endif |
490 |
|
|
491 |
%undef TEMP_Y1 |
%undef TEMP_Y1 |
500 |
; Code |
; Code |
501 |
;============================================================================= |
;============================================================================= |
502 |
|
|
503 |
SECTION .text |
TEXT |
504 |
|
|
505 |
%include "colorspace_mmx.inc" |
%include "colorspace_mmx.inc" |
506 |
|
|
514 |
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
MAKE_COLORSPACE yv12_to_bgr_mmx,48, 3,8,2, YV12_TO_BGR, 3, -1 |
515 |
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
MAKE_COLORSPACE yv12_to_bgra_mmx,48, 4,8,2, YV12_TO_BGR, 4, -1 |
516 |
|
|
517 |
|
|
518 |
|
%ifidn __OUTPUT_FORMAT__,elf |
519 |
|
section ".note.GNU-stack" noalloc noexec nowrite progbits |
520 |
|
%endif |
521 |
|
|