26 |
; these 3dne functions are compatible with iSSE, but are optimized specifically |
; these 3dne functions are compatible with iSSE, but are optimized specifically |
27 |
; for K7 pipelines |
; for K7 pipelines |
28 |
|
|
29 |
BITS 32 |
%include "nasm.inc" |
|
|
|
|
%macro cglobal 1 |
|
|
%ifdef PREFIX |
|
|
global _%1 |
|
|
%define %1 _%1 |
|
|
%else |
|
|
global %1 |
|
|
%endif |
|
|
%endmacro |
|
30 |
|
|
31 |
;============================================================================= |
;============================================================================= |
32 |
; Read only data |
; Read only data |
33 |
;============================================================================= |
;============================================================================= |
34 |
|
|
35 |
%ifdef FORMAT_COFF |
DATA |
|
SECTION .rodata data |
|
|
%else |
|
|
SECTION .rodata data align=16 |
|
|
%endif |
|
36 |
|
|
37 |
ALIGN 8 |
ALIGN SECTION_ALIGN |
38 |
mm_zero: |
mm_zero: |
39 |
dd 0,0 |
dd 0,0 |
40 |
;============================================================================= |
;============================================================================= |
41 |
; Macros |
; Macros |
42 |
;============================================================================= |
;============================================================================= |
43 |
|
|
44 |
|
%ifdef ARCH_IS_X86_64 |
45 |
|
%define nop4 |
46 |
|
%else |
47 |
%macro nop4 0 |
%macro nop4 0 |
48 |
db 08Dh, 074h, 026h, 0 |
db 08Dh, 074h, 026h, 0 |
49 |
%endmacro |
%endmacro |
50 |
|
%endif |
51 |
|
|
52 |
;============================================================================= |
;============================================================================= |
53 |
; Code |
; Code |
54 |
;============================================================================= |
;============================================================================= |
55 |
|
|
56 |
SECTION .text |
TEXT |
57 |
|
|
58 |
cglobal transfer_8to16copy_3dne |
cglobal transfer_8to16copy_3dne |
59 |
cglobal transfer_16to8copy_3dne |
cglobal transfer_16to8copy_3dne |
62 |
cglobal transfer_8to16sub2_3dne |
cglobal transfer_8to16sub2_3dne |
63 |
cglobal transfer_16to8add_3dne |
cglobal transfer_16to8add_3dne |
64 |
cglobal transfer8x8_copy_3dne |
cglobal transfer8x8_copy_3dne |
65 |
|
cglobal transfer8x4_copy_3dne |
66 |
|
|
67 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
68 |
; |
; |
72 |
; |
; |
73 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
74 |
|
|
75 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
76 |
transfer_8to16copy_3dne: |
transfer_8to16copy_3dne: |
77 |
|
|
78 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
79 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
80 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
81 |
punpcklbw mm0, [byte eax] |
punpcklbw mm0, [byte _EAX] |
82 |
punpcklbw mm1, [eax+4] |
punpcklbw mm1, [_EAX+4] |
83 |
movq mm2, [eax+edx] |
movq mm2, [_EAX+TMP1] |
84 |
movq mm3, [eax+edx] |
movq mm3, [_EAX+TMP1] |
85 |
pxor mm7, mm7 |
pxor mm7, mm7 |
86 |
lea eax, [eax+2*edx] |
lea _EAX, [_EAX+2*TMP1] |
87 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
88 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
89 |
psrlw mm0, 8 |
psrlw mm0, 8 |
90 |
psrlw mm1, 8 |
psrlw mm1, 8 |
91 |
punpcklbw mm4, [eax] |
punpcklbw mm4, [_EAX] |
92 |
punpcklbw mm5, [eax+edx+4] |
punpcklbw mm5, [_EAX+TMP1+4] |
93 |
movq [byte ecx+0*64], mm0 |
movq [byte TMP0+0*64], mm0 |
94 |
movq [ecx+0*64+8], mm1 |
movq [TMP0+0*64+8], mm1 |
95 |
punpcklbw mm6, [eax+edx] |
punpcklbw mm6, [_EAX+TMP1] |
96 |
punpcklbw mm7, [eax+4] |
punpcklbw mm7, [_EAX+4] |
97 |
lea eax, [byte eax+2*edx] |
lea _EAX, [byte _EAX+2*TMP1] |
98 |
psrlw mm4, 8 |
psrlw mm4, 8 |
99 |
psrlw mm5, 8 |
psrlw mm5, 8 |
100 |
punpcklbw mm0, [eax] |
punpcklbw mm0, [_EAX] |
101 |
punpcklbw mm1, [eax+edx+4] |
punpcklbw mm1, [_EAX+TMP1+4] |
102 |
movq [ecx+0*64+16], mm2 |
movq [TMP0+0*64+16], mm2 |
103 |
movq [ecx+0*64+24], mm3 |
movq [TMP0+0*64+24], mm3 |
104 |
psrlw mm6, 8 |
psrlw mm6, 8 |
105 |
psrlw mm7, 8 |
psrlw mm7, 8 |
106 |
punpcklbw mm2, [eax+edx] |
punpcklbw mm2, [_EAX+TMP1] |
107 |
punpcklbw mm3, [eax+4] |
punpcklbw mm3, [_EAX+4] |
108 |
lea eax, [byte eax+2*edx] |
lea _EAX, [byte _EAX+2*TMP1] |
109 |
movq [byte ecx+0*64+32], mm4 |
movq [byte TMP0+0*64+32], mm4 |
110 |
movq [ecx+0*64+56], mm5 |
movq [TMP0+0*64+56], mm5 |
111 |
psrlw mm0, 8 |
psrlw mm0, 8 |
112 |
psrlw mm1, 8 |
psrlw mm1, 8 |
113 |
punpcklbw mm4, [eax] |
punpcklbw mm4, [_EAX] |
114 |
punpcklbw mm5, [eax+edx+4] |
punpcklbw mm5, [_EAX+TMP1+4] |
115 |
movq [byte ecx+0*64+48], mm6 |
movq [byte TMP0+0*64+48], mm6 |
116 |
movq [ecx+0*64+40], mm7 |
movq [TMP0+0*64+40], mm7 |
117 |
psrlw mm2, 8 |
psrlw mm2, 8 |
118 |
psrlw mm3, 8 |
psrlw mm3, 8 |
119 |
punpcklbw mm6, [eax+edx] |
punpcklbw mm6, [_EAX+TMP1] |
120 |
punpcklbw mm7, [eax+4] |
punpcklbw mm7, [_EAX+4] |
121 |
movq [byte ecx+1*64], mm0 |
movq [byte TMP0+1*64], mm0 |
122 |
movq [ecx+1*64+24], mm1 |
movq [TMP0+1*64+24], mm1 |
123 |
psrlw mm4, 8 |
psrlw mm4, 8 |
124 |
psrlw mm5, 8 |
psrlw mm5, 8 |
125 |
movq [ecx+1*64+16], mm2 |
movq [TMP0+1*64+16], mm2 |
126 |
movq [ecx+1*64+8], mm3 |
movq [TMP0+1*64+8], mm3 |
127 |
psrlw mm6, 8 |
psrlw mm6, 8 |
128 |
psrlw mm7, 8 |
psrlw mm7, 8 |
129 |
movq [byte ecx+1*64+32], mm4 |
movq [byte TMP0+1*64+32], mm4 |
130 |
movq [ecx+1*64+56], mm5 |
movq [TMP0+1*64+56], mm5 |
131 |
movq [byte ecx+1*64+48], mm6 |
movq [byte TMP0+1*64+48], mm6 |
132 |
movq [ecx+1*64+40], mm7 |
movq [TMP0+1*64+40], mm7 |
133 |
ret |
ret |
134 |
|
ENDFUNC |
135 |
|
|
136 |
|
|
137 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
142 |
; |
; |
143 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
144 |
|
|
145 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
146 |
transfer_16to8copy_3dne: |
transfer_16to8copy_3dne: |
147 |
|
|
148 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
149 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
150 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
151 |
|
|
152 |
movq mm0, [byte eax+0*32] |
movq mm0, [byte _EAX+0*32] |
153 |
packuswb mm0, [eax+0*32+8] |
packuswb mm0, [_EAX+0*32+8] |
154 |
movq mm1, [eax+0*32+16] |
movq mm1, [_EAX+0*32+16] |
155 |
packuswb mm1, [eax+0*32+24] |
packuswb mm1, [_EAX+0*32+24] |
156 |
movq mm5, [eax+2*32+16] |
movq mm5, [_EAX+2*32+16] |
157 |
movq mm2, [eax+1*32] |
movq mm2, [_EAX+1*32] |
158 |
packuswb mm2, [eax+1*32+8] |
packuswb mm2, [_EAX+1*32+8] |
159 |
movq mm3, [eax+1*32+16] |
movq mm3, [_EAX+1*32+16] |
160 |
packuswb mm3, [eax+1*32+24] |
packuswb mm3, [_EAX+1*32+24] |
161 |
movq mm6, [eax+3*32] |
movq mm6, [_EAX+3*32] |
162 |
movq mm4, [eax+2*32] |
movq mm4, [_EAX+2*32] |
163 |
packuswb mm4, [eax+2*32+8] |
packuswb mm4, [_EAX+2*32+8] |
164 |
packuswb mm5, [eax+2*32+24] |
packuswb mm5, [_EAX+2*32+24] |
165 |
movq mm7, [eax+3*32+16] |
movq mm7, [_EAX+3*32+16] |
166 |
packuswb mm7, [eax+3*32+24] |
packuswb mm7, [_EAX+3*32+24] |
167 |
packuswb mm6, [eax+3*32+8] |
packuswb mm6, [_EAX+3*32+8] |
168 |
movq [ecx], mm0 |
movq [TMP0], mm0 |
169 |
lea eax, [3*edx] |
lea _EAX, [3*TMP1] |
170 |
add eax, ecx |
add _EAX, TMP0 |
171 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
172 |
movq [ecx+2*edx], mm2 |
movq [TMP0+2*TMP1], mm2 |
173 |
movq [byte eax], mm3 |
movq [byte _EAX], mm3 |
174 |
movq [ecx+4*edx], mm4 |
movq [TMP0+4*TMP1], mm4 |
175 |
lea ecx, [byte ecx+4*edx] |
lea TMP0, [byte TMP0+4*TMP1] |
176 |
movq [eax+2*edx], mm5 |
movq [_EAX+2*TMP1], mm5 |
177 |
movq [eax+4*edx], mm7 |
movq [_EAX+4*TMP1], mm7 |
178 |
movq [ecx+2*edx], mm6 |
movq [TMP0+2*TMP1], mm6 |
179 |
ret |
ret |
180 |
|
ENDFUNC |
181 |
|
|
182 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
183 |
; |
; |
188 |
; |
; |
189 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
190 |
|
|
191 |
; when second argument == 1, reference (ebx) block is to current (eax) |
; when second argument == 1, reference (ebx) block is to current (_EAX) |
192 |
%macro COPY_8_TO_16_SUB 2 |
%macro COPY_8_TO_16_SUB 2 |
193 |
movq mm1, [eax] ; cur |
movq mm1, [_EAX] ; cur |
194 |
movq mm0, mm1 |
movq mm0, mm1 |
195 |
movq mm4, [ecx] ; ref |
movq mm4, [TMP0] ; ref |
196 |
movq mm6, mm4 |
movq mm6, mm4 |
197 |
%if %2 == 1 |
%if %2 == 1 |
198 |
movq [eax], mm4 |
movq [_EAX], mm4 |
199 |
%endif |
%endif |
200 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
201 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
202 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
203 |
ALIGN 8 |
ALIGN SECTION_ALIGN |
204 |
movq mm2, [byte eax+edx] |
movq mm2, [byte _EAX+TMP1] |
205 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
206 |
movq mm3, [byte eax+edx] |
movq mm3, [byte _EAX+TMP1] |
207 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
208 |
movq mm5, [byte ecx+edx] ; ref |
movq mm5, [byte TMP0+TMP1] ; ref |
209 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
210 |
%if %2 == 1 |
%if %2 == 1 |
211 |
movq [byte eax+edx], mm5 |
movq [byte _EAX+TMP1], mm5 |
212 |
%endif |
%endif |
213 |
psubsw mm1, mm6 |
psubsw mm1, mm6 |
214 |
|
|
215 |
movq mm6, mm5 |
movq mm6, mm5 |
216 |
psubsw mm0, mm4 |
psubsw mm0, mm4 |
217 |
%if (%1 < 3) |
%if (%1 < 3) |
218 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
219 |
lea ecx,[ecx+2*edx] |
lea TMP0,[TMP0+2*TMP1] |
220 |
%else |
%else |
221 |
mov ecx,[esp] |
mov TMP0,[_ESP] |
222 |
add esp,byte 4 |
add _ESP,byte PTR_SIZE |
223 |
%endif |
%endif |
224 |
movq [edi+%1*32+ 8], mm1 |
movq [_EDI+%1*32+ 8], mm1 |
225 |
movq [byte edi+%1*32+ 0], mm0 ; dst |
movq [byte _EDI+%1*32+ 0], mm0 ; dst |
226 |
punpcklbw mm5, mm7 |
punpcklbw mm5, mm7 |
227 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
228 |
psubsw mm2, mm5 |
psubsw mm2, mm5 |
229 |
psubsw mm3, mm6 |
psubsw mm3, mm6 |
230 |
movq [edi+%1*32+16], mm2 |
movq [_EDI+%1*32+16], mm2 |
231 |
movq [edi+%1*32+24], mm3 |
movq [_EDI+%1*32+24], mm3 |
232 |
%endmacro |
%endmacro |
233 |
|
|
234 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
235 |
transfer_8to16sub_3dne: |
transfer_8to16sub_3dne: |
236 |
mov eax, [esp + 8] ; Cur |
mov _EAX, prm2 ; Cur |
237 |
mov ecx, [esp +12] ; Ref |
mov TMP0, prm3 ; Ref |
238 |
push edi |
mov TMP1, prm4 ; Stride |
239 |
mov edx, [dword esp+4+16] ; Stride |
|
240 |
mov edi, [esp+4+ 4] ; Dst |
push _EDI |
241 |
|
%ifdef ARCH_IS_X86_64 |
242 |
|
mov _EDI, prm1 |
243 |
|
%else |
244 |
|
mov _EDI, [_ESP+4+4] ; Dst |
245 |
|
%endif |
246 |
|
|
247 |
pxor mm7, mm7 |
pxor mm7, mm7 |
248 |
nop |
nop |
249 |
ALIGN 4 |
ALIGN SECTION_ALIGN |
250 |
COPY_8_TO_16_SUB 0, 1 |
COPY_8_TO_16_SUB 0, 1 |
251 |
COPY_8_TO_16_SUB 1, 1 |
COPY_8_TO_16_SUB 1, 1 |
252 |
COPY_8_TO_16_SUB 2, 1 |
COPY_8_TO_16_SUB 2, 1 |
253 |
COPY_8_TO_16_SUB 3, 1 |
COPY_8_TO_16_SUB 3, 1 |
254 |
mov edi, ecx |
mov _EDI, TMP0 |
255 |
ret |
ret |
256 |
|
ENDFUNC |
257 |
|
|
258 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
259 |
transfer_8to16subro_3dne: |
transfer_8to16subro_3dne: |
260 |
mov eax, [esp + 8] ; Cur |
mov _EAX, prm2 ; Cur |
261 |
mov ecx, [esp +12] ; Ref |
mov TMP0, prm3 ; Ref |
262 |
push edi |
mov TMP1, prm4 ; Stride |
263 |
mov edx, [dword esp+4+16] ; Stride |
|
264 |
mov edi, [esp+4+ 4] ; Dst |
push _EDI |
265 |
|
%ifdef ARCH_IS_X86_64 |
266 |
|
mov _EDI, prm1 |
267 |
|
%else |
268 |
|
mov _EDI, [_ESP+4+ 4] ; Dst |
269 |
|
%endif |
270 |
|
|
271 |
pxor mm7, mm7 |
pxor mm7, mm7 |
272 |
nop |
nop |
273 |
ALIGN 4 |
ALIGN SECTION_ALIGN |
274 |
COPY_8_TO_16_SUB 0, 0 |
COPY_8_TO_16_SUB 0, 0 |
275 |
COPY_8_TO_16_SUB 1, 0 |
COPY_8_TO_16_SUB 1, 0 |
276 |
COPY_8_TO_16_SUB 2, 0 |
COPY_8_TO_16_SUB 2, 0 |
277 |
COPY_8_TO_16_SUB 3, 0 |
COPY_8_TO_16_SUB 3, 0 |
278 |
mov edi, ecx |
mov _EDI, TMP0 |
279 |
ret |
ret |
280 |
|
ENDFUNC |
281 |
|
|
282 |
|
|
283 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
291 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
292 |
|
|
293 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
%macro COPY_8_TO_16_SUB2_SSE 1 |
294 |
db 0Fh, 6Fh, 44h, 20h, 00 ;movq mm0, [byte eax] ; cur |
db 0Fh, 6Fh, 44h, 20h, 00 ;movq mm0, [byte _EAX] ; cur |
295 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
296 |
movq mm2, [byte eax+edx] |
movq mm2, [byte _EAX+TMP1] |
297 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
298 |
db 0Fh, 6Fh, 4ch, 20h, 00 ;movq mm1, [byte eax] |
db 0Fh, 6Fh, 4ch, 20h, 00 ;movq mm1, [byte _EAX] |
299 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
300 |
movq mm3, [byte eax+edx] |
movq mm3, [byte _EAX+TMP1] |
301 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
302 |
|
|
303 |
movq mm4, [byte ebx] ; ref1 |
movq mm4, [byte _EBX] ; ref1 |
304 |
pavgb mm4, [byte esi] ; ref2 |
pavgb mm4, [byte _ESI] ; ref2 |
305 |
movq [eax], mm4 |
movq [_EAX], mm4 |
306 |
movq mm5, [ebx+edx] ; ref |
movq mm5, [_EBX+TMP1] ; ref |
307 |
pavgb mm5, [esi+edx] ; ref2 |
pavgb mm5, [_ESI+TMP1] ; ref2 |
308 |
movq [eax+edx], mm5 |
movq [_EAX+TMP1], mm5 |
309 |
movq mm6, mm4 |
movq mm6, mm4 |
310 |
punpcklbw mm4, mm7 |
punpcklbw mm4, mm7 |
311 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
312 |
%if (%1 < 3) |
%if (%1 < 3) |
313 |
lea esi,[esi+2*edx] |
lea _ESI,[_ESI+2*TMP1] |
314 |
lea ebx,[byte ebx+2*edx] |
lea _EBX,[byte _EBX+2*TMP1] |
315 |
lea eax,[eax+2*edx] |
lea _EAX,[_EAX+2*TMP1] |
316 |
%else |
%else |
317 |
mov esi,[esp] |
mov _ESI,[_ESP] |
318 |
mov ebx,[esp+4] |
mov _EBX,[_ESP+PTR_SIZE] |
319 |
add esp,byte 8 |
add _ESP,byte 2*PTR_SIZE |
320 |
%endif |
%endif |
321 |
psubsw mm0, mm4 |
psubsw mm0, mm4 |
322 |
psubsw mm1, mm6 |
psubsw mm1, mm6 |
325 |
punpckhbw mm6, mm7 |
punpckhbw mm6, mm7 |
326 |
psubsw mm2, mm5 |
psubsw mm2, mm5 |
327 |
psubsw mm3, mm6 |
psubsw mm3, mm6 |
328 |
movq [byte ecx+%1*32+ 0], mm0 ; dst |
movq [byte TMP0+%1*32+ 0], mm0 ; dst |
329 |
movq [ecx+%1*32+ 8], mm1 |
movq [TMP0+%1*32+ 8], mm1 |
330 |
movq [ecx+%1*32+16], mm2 |
movq [TMP0+%1*32+16], mm2 |
331 |
movq [ecx+%1*32+24], mm3 |
movq [TMP0+%1*32+24], mm3 |
332 |
%endmacro |
%endmacro |
333 |
|
|
334 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
335 |
transfer_8to16sub2_3dne: |
transfer_8to16sub2_3dne: |
336 |
mov edx, [esp +20] ; Stride |
mov TMP1d, prm5d ; Stride |
337 |
mov ecx, [esp + 4] ; Dst |
mov TMP0, prm1 ; Dst |
338 |
mov eax, [esp + 8] ; Cur |
mov _EAX, prm2 ; Cur |
339 |
push ebx |
push _EBX |
340 |
lea ebp,[byte ebp] |
lea _EBP,[byte _EBP] |
341 |
mov ebx, [esp+4+12] ; Ref1 |
|
342 |
push esi |
%ifdef ARCH_IS_X86_64 |
343 |
|
mov _EBX, prm3 |
344 |
|
%else |
345 |
|
mov _EBX, [_ESP+4+12] ; Ref1 |
346 |
|
%endif |
347 |
|
|
348 |
|
push _ESI |
349 |
pxor mm7, mm7 |
pxor mm7, mm7 |
350 |
mov esi, [esp+8+16] ; Ref2 |
|
351 |
|
%ifdef ARCH_IS_X86_64 |
352 |
|
mov _ESI, prm4 |
353 |
|
%else |
354 |
|
mov _ESI, [_ESP+8+16] ; Ref2 |
355 |
|
%endif |
356 |
|
|
357 |
nop4 |
nop4 |
358 |
COPY_8_TO_16_SUB2_SSE 0 |
COPY_8_TO_16_SUB2_SSE 0 |
359 |
COPY_8_TO_16_SUB2_SSE 1 |
COPY_8_TO_16_SUB2_SSE 1 |
361 |
COPY_8_TO_16_SUB2_SSE 3 |
COPY_8_TO_16_SUB2_SSE 3 |
362 |
|
|
363 |
ret |
ret |
364 |
|
ENDFUNC |
365 |
|
|
366 |
|
|
367 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
373 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
374 |
|
|
375 |
%macro COPY_16_TO_8_ADD 1 |
%macro COPY_16_TO_8_ADD 1 |
376 |
db 0Fh, 6Fh, 44h, 21h, 00 ;movq mm0, [byte ecx] |
movq mm0, [byte TMP0] |
377 |
punpcklbw mm0, mm7 |
punpcklbw mm0, mm7 |
378 |
movq mm2, [byte ecx+edx] |
movq mm2, [byte TMP0+TMP1] |
379 |
punpcklbw mm2, mm7 |
punpcklbw mm2, mm7 |
380 |
db 0Fh, 6Fh, 4ch, 21h, 00 ;movq mm1, [byte ecx] |
movq mm1, [byte TMP0] |
381 |
punpckhbw mm1, mm7 |
punpckhbw mm1, mm7 |
382 |
movq mm3, [byte ecx+edx] |
movq mm3, [byte TMP0+TMP1] |
383 |
punpckhbw mm3, mm7 |
punpckhbw mm3, mm7 |
384 |
paddsw mm0, [byte eax+%1*32+ 0] |
paddsw mm0, [byte _EAX+%1*32+ 0] |
385 |
paddsw mm1, [eax+%1*32+ 8] |
paddsw mm1, [_EAX+%1*32+ 8] |
386 |
paddsw mm2, [eax+%1*32+16] |
paddsw mm2, [_EAX+%1*32+16] |
387 |
paddsw mm3, [eax+%1*32+24] |
paddsw mm3, [_EAX+%1*32+24] |
388 |
packuswb mm0, mm1 |
packuswb mm0, mm1 |
389 |
packuswb mm2, mm3 |
packuswb mm2, mm3 |
390 |
mov esp,esp |
mov _ESP, _ESP |
391 |
movq [byte ecx], mm0 |
movq [byte TMP0], mm0 |
392 |
movq [ecx+edx], mm2 |
movq [TMP0+TMP1], mm2 |
393 |
%endmacro |
%endmacro |
394 |
|
|
395 |
|
|
396 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
397 |
transfer_16to8add_3dne: |
transfer_16to8add_3dne: |
398 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
399 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
400 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
401 |
pxor mm7, mm7 |
pxor mm7, mm7 |
402 |
nop |
nop |
403 |
|
|
404 |
COPY_16_TO_8_ADD 0 |
COPY_16_TO_8_ADD 0 |
405 |
lea ecx,[byte ecx+2*edx] |
lea TMP0,[byte TMP0+2*TMP1] |
406 |
COPY_16_TO_8_ADD 1 |
COPY_16_TO_8_ADD 1 |
407 |
lea ecx,[byte ecx+2*edx] |
lea TMP0,[byte TMP0+2*TMP1] |
408 |
COPY_16_TO_8_ADD 2 |
COPY_16_TO_8_ADD 2 |
409 |
lea ecx,[byte ecx+2*edx] |
lea TMP0,[byte TMP0+2*TMP1] |
410 |
COPY_16_TO_8_ADD 3 |
COPY_16_TO_8_ADD 3 |
411 |
ret |
ret |
412 |
|
ENDFUNC |
413 |
|
|
414 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
415 |
; |
; |
421 |
;----------------------------------------------------------------------------- |
;----------------------------------------------------------------------------- |
422 |
|
|
423 |
%macro COPY_8_TO_8 0 |
%macro COPY_8_TO_8 0 |
424 |
movq mm0, [byte eax] |
movq mm0, [byte _EAX] |
425 |
movq mm1, [eax+edx] |
movq mm1, [_EAX+TMP1] |
426 |
movq [byte ecx], mm0 |
movq [byte TMP0], mm0 |
427 |
lea eax,[byte eax+2*edx] |
lea _EAX,[byte _EAX+2*TMP1] |
428 |
movq [ecx+edx], mm1 |
movq [TMP0+TMP1], mm1 |
429 |
%endmacro |
%endmacro |
430 |
|
|
431 |
ALIGN 16 |
ALIGN SECTION_ALIGN |
432 |
transfer8x8_copy_3dne: |
transfer8x8_copy_3dne: |
433 |
mov eax, [esp+ 8] ; Src |
mov _EAX, prm2 ; Src |
434 |
mov edx, [esp+12] ; Stride |
mov TMP1, prm3 ; Stride |
435 |
mov ecx, [esp+ 4] ; Dst |
mov TMP0, prm1 ; Dst |
436 |
|
|
437 |
COPY_8_TO_8 |
COPY_8_TO_8 |
438 |
lea ecx,[byte ecx+2*edx] |
lea TMP0,[byte TMP0+2*TMP1] |
439 |
|
COPY_8_TO_8 |
440 |
|
lea TMP0,[byte TMP0+2*TMP1] |
441 |
COPY_8_TO_8 |
COPY_8_TO_8 |
442 |
lea ecx,[byte ecx+2*edx] |
lea TMP0,[byte TMP0+2*TMP1] |
443 |
|
COPY_8_TO_8 |
444 |
|
ret |
445 |
|
ENDFUNC |
446 |
|
|
447 |
|
;----------------------------------------------------------------------------- |
448 |
|
; |
449 |
|
; void transfer8x4_copy_3dne(uint8_t * const dst, |
450 |
|
; const uint8_t * const src, |
451 |
|
; const uint32_t stride); |
452 |
|
; |
453 |
|
; |
454 |
|
;----------------------------------------------------------------------------- |
455 |
|
|
456 |
|
ALIGN SECTION_ALIGN |
457 |
|
transfer8x4_copy_3dne: |
458 |
|
mov _EAX, prm2 ; Src |
459 |
|
mov TMP1, prm3 ; Stride |
460 |
|
mov TMP0, prm1 ; Dst |
461 |
|
|
462 |
COPY_8_TO_8 |
COPY_8_TO_8 |
463 |
lea ecx,[byte ecx+2*edx] |
lea TMP0,[byte TMP0+2*TMP1] |
464 |
COPY_8_TO_8 |
COPY_8_TO_8 |
465 |
ret |
ret |
466 |
|
ENDFUNC |
467 |
|
|
468 |
|
NON_EXEC_STACK |