3 # ====================================================================
4 # [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
11 # functions were re-implemented to address P4 performance issue [see
12 # commentary below], and in 2006 the rest was rewritten in order to
13 # gain freedom to liberate licensing terms.
15 # January, September 2004.
17 # It was noted that Intel IA-32 C compiler generates code which
18 # performs ~30% *faster* on P4 CPU than original *hand-coded*
19 # SHA1 assembler implementation. To address this problem (and
20 # prove that humans are still better than machines:-), the
21 # original code was overhauled, which resulted in following
22 # performance changes:
24 # compared with original compared with Intel cc
25 # assembler impl. generated code
30 # As you can see Pentium came out as looser:-( Yet I reckoned that
31 # improvement on P4 outweights the loss and incorporate this
32 # re-tuned code to 0.9.7 and later.
33 # ----------------------------------------------------------------
34 # <appro@fy.chalmers.se>
38 # George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
39 # '(c&d) + (b&(c^d))', which allows to accumulate partial results
40 # and lighten "pressure" on scratch registers. This resulted in
41 # >12% performance improvement on contemporary AMD cores (with no
42 # degradation on other CPUs:-). Also, the code was revised to maximize
43 # "distance" between instructions producing input to 'lea' instruction
44 # and the 'lea' instruction itself, which is essential for Intel Atom
45 # core and resulted in ~15% improvement.
49 # Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
50 # is to offload message schedule denoted by Wt in NIST specification,
51 # or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
52 # and in SSE2 context was first explored by Dean Gaudet in 2004, see
53 # http://arctic.org/~dean/crypto/sha1.html. Since then several things
54 # have changed that made it interesting again:
56 # a) XMM units became faster and wider;
57 # b) instruction set became more versatile;
58 # c) an important observation was made by Max Locktykhin, which made
59 # it possible to reduce amount of instructions required to perform
60 # the operation in question, for further details see
61 # http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
65 # Add AVX code path, probably most controversial... The thing is that
66 # switch to AVX alone improves performance by as little as 4% in
67 # comparison to SSSE3 code path. But below result doesn't look like
68 # 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
69 # pair of µ-ops, and it's the additional µ-ops, two per round, that
70 # make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
71 # as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
72 # equivalent 'sh[rl]d' that is responsible for the impressive 5.1
73 # cycles per processed byte. But 'sh[rl]d' is not something that used
74 # to be fast, nor does it appear to be fast in upcoming Bulldozer
75 # [according to its optimization manual]. Which is why AVX code path
76 # is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
77 # One can argue that it's unfair to AMD, but without 'sh[rl]d' it
78 # makes no sense to keep the AVX code path. If somebody feels that
79 # strongly, it's probably more appropriate to discuss possibility of
80 # using vector rotate XOP on AMD...
84 # Add support for Intel SHA Extensions.
86 ######################################################################
87 # Current performance is summarized in following table. Numbers are
88 # CPU clock cycles spent to process single byte (less is better).
95 # Core2 7.3 6.0/+22% -
96 # Atom 12.5 9.3(*)/+35% -
97 # Westmere 7.3 5.5/+33% -
98 # Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73%
99 # Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53%
100 # Haswell 6.5 4.3/+51% 4.1(**)/+58%
101 # Bulldozer 11.6 6.0/+92%
102 # VIA Nano 10.6 7.5/+41%
104 # (*) Loop is 1056 instructions long and expected result is ~8.25.
105 # It remains mystery [to me] why ILP is limited to 1.7.
107 # (**) As per above comment, the result is for AVX *plus* sh[rl]d.
109 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
110 push(@INC,"${dir}","${dir}../../perlasm");
113 &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
116 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
119 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
120 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
121 $1>=2.19); # first version supporting AVX
123 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
124 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
125 $1>=2.03); # first version supporting AVX
127 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
128 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
129 $1>=10); # first version supporting AVX
131 $shaext=$xmm; ### set to zero if compiling for 1.0.1
133 &external_label("OPENSSL_ia32cap_P") if ($xmm);
144 @V=($A,$B,$C,$D,$E,$T);
146 $alt=0; # 1 denotes alternative IALU implementation, which performs
147 # 8% *worse* on P4, same on Westmere and Atom, 2% better on
152 local($n,$a,$b,$c,$d,$e,$f)=@_;
154 &comment("00_15 $n");
156 &mov($f,$c); # f to hold F_00_19(b,c,d)
157 if ($n==0) { &mov($tmp1,$a); }
158 else { &mov($a,$tmp1); }
159 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
161 &add($tmp1,$e); # tmp1+=e;
162 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
163 # with xi, also note that e becomes
166 &rotr($b,2); # b=ROTATE(b,30)
167 &xor($f,$d); # f holds F_00_19(b,c,d)
168 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
170 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
171 &add($f,$tmp1); } # f+=tmp1
172 else { &add($tmp1,$f); } # f becomes a in next round
173 &mov($tmp1,$a) if ($alt && $n==15);
178 local($n,$a,$b,$c,$d,$e,$f)=@_;
180 &comment("16_19 $n");
184 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
185 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d
186 &xor($f,&swtmp(($n+8)%16));
187 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
188 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
189 &rotl($f,1); # f=ROTATE(f,1)
190 &add($e,$tmp1); # e+=F_00_19(b,c,d)
191 &xor($c,$d); # restore $c
192 &mov($tmp1,$a); # b in next round
193 &rotr($b,$n==16?2:7); # b=ROTATE(b,30)
194 &mov(&swtmp($n%16),$f); # xi=f
195 &rotl($a,5); # ROTATE(a,5)
196 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
197 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
198 &add($f,$a); # f+=ROTATE(a,5)
200 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
201 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
203 &xor($f,&swtmp(($n+8)%16));
205 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
206 &rotl($f,1); # f=ROTATE(f,1)
207 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
208 &add($e,$tmp1); # e+=F_00_19(b,c,d)
210 &rotr($b,2); # b=ROTATE(b,30)
211 &mov(&swtmp($n%16),$f); # xi=f
212 &rotl($tmp1,5); # ROTATE(a,5)
213 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
214 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
215 &add($f,$tmp1); # f+=ROTATE(a,5)
221 local($n,$a,$b,$c,$d,$e,$f)=@_;
222 local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
224 &comment("20_39 $n");
227 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c
228 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
229 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
230 &xor($f,&swtmp(($n+8)%16));
231 &add($e,$tmp1); # e+=F_20_39(b,c,d)
232 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
233 &rotl($f,1); # f=ROTATE(f,1)
234 &mov($tmp1,$a); # b in next round
235 &rotr($b,7); # b=ROTATE(b,30)
236 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
237 &rotl($a,5); # ROTATE(a,5)
238 &xor($b,$c) if($n==39);# warm up for BODY_40_59
239 &and($tmp1,$b) if($n==39);
240 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
241 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
242 &add($f,$a); # f+=ROTATE(a,5)
243 &rotr($a,5) if ($n==79);
245 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
246 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
248 &xor($f,&swtmp(($n+8)%16));
249 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
250 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
251 &rotl($f,1); # f=ROTATE(f,1)
252 &add($e,$tmp1); # e+=F_20_39(b,c,d)
253 &rotr($b,2); # b=ROTATE(b,30)
255 &rotl($tmp1,5); # ROTATE(a,5)
256 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
257 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
258 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
259 &add($f,$tmp1); # f+=ROTATE(a,5)
265 local($n,$a,$b,$c,$d,$e,$f)=@_;
267 &comment("40_59 $n");
270 &add($e,$tmp1); # e+=b&(c^d)
271 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
273 &xor($f,&swtmp(($n+8)%16));
274 &xor($c,$d); # restore $c
275 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
276 &rotl($f,1); # f=ROTATE(f,1)
278 &rotr($b,7); # b=ROTATE(b,30)
279 &add($e,$tmp1); # e+=c&d
280 &mov($tmp1,$a); # b in next round
281 &mov(&swtmp($n%16),$f); # xi=f
282 &rotl($a,5); # ROTATE(a,5)
283 &xor($b,$c) if ($n<59);
284 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d)
285 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
286 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
287 &add($f,$a); # f+=ROTATE(a,5)
289 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
290 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
292 &xor($f,&swtmp(($n+8)%16));
294 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
295 &rotl($f,1); # f=ROTATE(f,1)
296 &add($tmp1,$e); # b&(c^d)+=e
297 &rotr($b,2); # b=ROTATE(b,30)
298 &mov($e,$a); # e becomes volatile
299 &rotl($e,5); # ROTATE(a,5)
300 &mov(&swtmp($n%16),$f); # xi=f
301 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
303 &add($f,$e); # f+=ROTATE(a,5)
305 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
306 &add($f,$tmp1); # f+=c&d
310 &function_begin("sha1_block_data_order");
312 &static_label("shaext_shortcut") if ($shaext);
313 &static_label("ssse3_shortcut");
314 &static_label("avx_shortcut") if ($ymm);
315 &static_label("K_XX_XX");
317 &call (&label("pic_point")); # make it PIC!
318 &set_label("pic_point");
320 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
321 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
323 &mov ($A,&DWP(0,$T));
324 &mov ($D,&DWP(4,$T));
325 &test ($D,1<<9); # check SSSE3 bit
327 &mov ($C,&DWP(8,$T));
328 &test ($A,1<<24); # check FXSR bit
331 &test ($C,1<<29); # check SHA bit
332 &jnz (&label("shaext_shortcut"));
335 &and ($D,1<<28); # mask AVX bit
336 &and ($A,1<<30); # mask "Intel CPU" bit
338 &cmp ($A,1<<28|1<<30);
339 &je (&label("avx_shortcut"));
341 &jmp (&label("ssse3_shortcut"));
342 &set_label("x86",16);
344 &mov($tmp1,&wparam(0)); # SHA_CTX *c
345 &mov($T,&wparam(1)); # const void *input
346 &mov($A,&wparam(2)); # size_t num
347 &stack_push(16+3); # allocate X[16]
350 &mov(&wparam(2),$A); # pointer beyond the end of input
351 &mov($E,&DWP(16,$tmp1));# pre-load E
352 &jmp(&label("loop"));
354 &set_label("loop",16);
356 # copy input chunk to X, but reversing byte order!
357 for ($i=0; $i<16; $i+=4)
359 &mov($A,&DWP(4*($i+0),$T));
360 &mov($B,&DWP(4*($i+1),$T));
361 &mov($C,&DWP(4*($i+2),$T));
362 &mov($D,&DWP(4*($i+3),$T));
367 &mov(&swtmp($i+0),$A);
368 &mov(&swtmp($i+1),$B);
369 &mov(&swtmp($i+2),$C);
370 &mov(&swtmp($i+3),$D);
372 &mov(&wparam(1),$T); # redundant in 1st spin
374 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
375 &mov($B,&DWP(4,$tmp1));
376 &mov($C,&DWP(8,$tmp1));
377 &mov($D,&DWP(12,$tmp1));
380 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
381 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
382 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
383 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
384 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
386 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
388 &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
389 &mov($D,&wparam(1)); # D is last "T" and is discarded
391 &add($E,&DWP(0,$tmp1)); # E is last "A"...
392 &add($T,&DWP(4,$tmp1));
393 &add($A,&DWP(8,$tmp1));
394 &add($B,&DWP(12,$tmp1));
395 &add($C,&DWP(16,$tmp1));
397 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
398 &add($D,64); # advance input pointer
399 &mov(&DWP(4,$tmp1),$T);
400 &cmp($D,&wparam(2)); # have we reached the end yet?
401 &mov(&DWP(8,$tmp1),$A);
402 &mov($E,$C); # C is last "E" which needs to be "pre-loaded"
403 &mov(&DWP(12,$tmp1),$B);
404 &mov($T,$D); # input pointer
405 &mov(&DWP(16,$tmp1),$C);
409 &function_end("sha1_block_data_order");
413 ######################################################################
414 # Intel SHA Extensions implementation of SHA1 update function.
416 my ($ctx,$inp,$num)=("edi","esi","ecx");
417 my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
418 my @MSG=map("xmm$_",(4..7));
421 my ($dst,$src,$imm)=@_;
422 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
423 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); }
426 my ($opcodelet,$dst,$src)=@_;
427 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
428 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
430 sub sha1nexte { sha1op38(0xc8,@_); }
431 sub sha1msg1 { sha1op38(0xc9,@_); }
432 sub sha1msg2 { sha1op38(0xca,@_); }
434 &function_begin("_sha1_block_data_order_shaext");
435 &call (&label("pic_point")); # make it PIC!
436 &set_label("pic_point");
438 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
439 &set_label("shaext_shortcut");
440 &mov ($ctx,&wparam(0));
442 &mov ($inp,&wparam(1));
443 &mov ($num,&wparam(2));
446 &movdqu ($ABCD,&QWP(0,$ctx));
447 &movd ($E,&QWP(16,$ctx));
449 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap
451 &movdqu (@MSG[0],&QWP(0,$inp));
452 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order
453 &movdqu (@MSG[1],&QWP(0x10,$inp));
454 &pshufd ($E,$E,0b00011011); # flip word order
455 &movdqu (@MSG[2],&QWP(0x20,$inp));
456 &pshufb (@MSG[0],$BSWAP);
457 &movdqu (@MSG[3],&QWP(0x30,$inp));
458 &pshufb (@MSG[1],$BSWAP);
459 &pshufb (@MSG[2],$BSWAP);
460 &pshufb (@MSG[3],$BSWAP);
461 &jmp (&label("loop_shaext"));
463 &set_label("loop_shaext",16);
465 &lea ("eax",&DWP(0x40,$inp));
466 &movdqa (&QWP(0,"esp"),$E); # offload $E
468 &cmovne ($inp,"eax");
469 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD
471 for($i=0;$i<20-4;$i+=2) {
472 &sha1msg1 (@MSG[0],@MSG[1]);
474 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3...
475 &sha1nexte ($E_,@MSG[1]);
476 &pxor (@MSG[0],@MSG[2]);
477 &sha1msg1 (@MSG[1],@MSG[2]);
478 &sha1msg2 (@MSG[0],@MSG[3]);
481 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5));
482 &sha1nexte ($E,@MSG[2]);
483 &pxor (@MSG[1],@MSG[3]);
484 &sha1msg2 (@MSG[1],@MSG[0]);
486 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
488 &movdqu (@MSG[0],&QWP(0,$inp));
490 &sha1rnds4 ($ABCD,$E,3); # 64-67
491 &sha1nexte ($E_,@MSG[1]);
492 &movdqu (@MSG[1],&QWP(0x10,$inp));
493 &pshufb (@MSG[0],$BSWAP);
496 &sha1rnds4 ($ABCD,$E_,3); # 68-71
497 &sha1nexte ($E,@MSG[2]);
498 &movdqu (@MSG[2],&QWP(0x20,$inp));
499 &pshufb (@MSG[1],$BSWAP);
502 &sha1rnds4 ($ABCD,$E,3); # 72-75
503 &sha1nexte ($E_,@MSG[3]);
504 &movdqu (@MSG[3],&QWP(0x30,$inp));
505 &pshufb (@MSG[2],$BSWAP);
508 &sha1rnds4 ($ABCD,$E_,3); # 76-79
509 &movdqa ($E_,&QWP(0,"esp"));
510 &pshufb (@MSG[3],$BSWAP);
512 &paddd ($ABCD,&QWP(16,"esp"));
514 &jnz (&label("loop_shaext"));
516 &pshufd ($ABCD,$ABCD,0b00011011);
517 &pshufd ($E,$E,0b00011011);
518 &movdqu (&QWP(0,$ctx),$ABCD)
519 &movd (&DWP(16,$ctx),$E);
521 &function_end("_sha1_block_data_order_shaext");
523 ######################################################################
524 # The SSSE3 implementation.
526 # %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
527 # 32 elements of the message schedule or Xupdate outputs. First 4
528 # quadruples are simply byte-swapped input, next 4 are calculated
529 # according to method originally suggested by Dean Gaudet (modulo
530 # being implemented in SSSE3). Once 8 quadruples or 32 elements are
531 # collected, it switches to routine proposed by Max Locktyukhin.
533 # Calculations inevitably require temporary reqisters, and there are
534 # no %xmm registers left to spare. For this reason part of the ring
535 # buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
536 # buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
537 # X[-5], and X[4] - X[-4]...
539 # Another notable optimization is aggressive stack frame compression
540 # aiming to minimize amount of 9-byte instructions...
542 # Yet another notable optimization is "jumping" $B variable. It means
543 # that there is no register permanently allocated for $B value. This
544 # allowed to eliminate one instruction from body_20_39...
546 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
547 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
548 my @V=($A,$B,$C,$D,$E);
549 my $j=0; # hash round
554 my $_rol=sub { &rol(@_) };
555 my $_ror=sub { &ror(@_) };
557 &function_begin("_sha1_block_data_order_ssse3");
558 &call (&label("pic_point")); # make it PIC!
559 &set_label("pic_point");
561 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
562 &set_label("ssse3_shortcut");
564 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19
565 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39
566 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59
567 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79
568 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask
570 &mov ($E,&wparam(0)); # load argument block
571 &mov ($inp=@T[1],&wparam(1));
572 &mov ($D,&wparam(2));
577 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
578 # X[4]+K X[5]+K X[6]+K X[7]+K
579 # X[8]+K X[9]+K X[10]+K X[11]+K
580 # X[12]+K X[13]+K X[14]+K X[15]+K
582 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
583 # X[4] X[5] X[6] X[7]
584 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
586 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
587 # K_40_59 K_40_59 K_40_59 K_40_59
588 # K_60_79 K_60_79 K_60_79 K_60_79
589 # K_00_19 K_00_19 K_00_19 K_00_19
592 # +192 ctx # argument block
599 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants
600 &movdqa (&QWP(112+16,"esp"),@X[5]);
601 &movdqa (&QWP(112+32,"esp"),@X[6]);
602 &shl ($D,6); # len*64
603 &movdqa (&QWP(112+48,"esp"),@X[3]);
604 &add ($D,$inp); # end of input
605 &movdqa (&QWP(112+64,"esp"),@X[2]);
607 &mov (&DWP(192+0,"esp"),$E); # save argument block
608 &mov (&DWP(192+4,"esp"),$inp);
609 &mov (&DWP(192+8,"esp"),$D);
610 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
612 &mov ($A,&DWP(0,$E)); # load context
613 &mov ($B,&DWP(4,$E));
614 &mov ($C,&DWP(8,$E));
615 &mov ($D,&DWP(12,$E));
616 &mov ($E,&DWP(16,$E));
617 &mov (@T[0],$B); # magic seed
619 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
620 &movdqu (@X[-3&7],&QWP(-48,$inp));
621 &movdqu (@X[-2&7],&QWP(-32,$inp));
622 &movdqu (@X[-1&7],&QWP(-16,$inp));
623 &pshufb (@X[-4&7],@X[2]); # byte swap
624 &pshufb (@X[-3&7],@X[2]);
625 &pshufb (@X[-2&7],@X[2]);
626 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
627 &pshufb (@X[-1&7],@X[2]);
628 &paddd (@X[-4&7],@X[3]); # add K_00_19
629 &paddd (@X[-3&7],@X[3]);
630 &paddd (@X[-2&7],@X[3]);
631 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU
632 &psubd (@X[-4&7],@X[3]); # restore X[]
633 &movdqa (&QWP(0+16,"esp"),@X[-3&7]);
634 &psubd (@X[-3&7],@X[3]);
635 &movdqa (&QWP(0+32,"esp"),@X[-2&7]);
637 &psubd (@X[-2&7],@X[3]);
639 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
641 &jmp (&label("loop"));
643 ######################################################################
644 # SSE instruction sequence is first broken to groups of indepentent
645 # instructions, independent in respect to their inputs and shifter
646 # (not all architectures have more than one). Then IALU instructions
647 # are "knitted in" between the SSE groups. Distance is maintained for
648 # SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
649 # [which allegedly also implements SSSE3]...
651 # Temporary registers usage. X[2] is volatile at the entry and at the
652 # end is restored from backtrace ring buffer. X[3] is expected to
653 # contain current K_XX_XX constant and is used to caclulate X[-1]+K
654 # from previous round, it becomes volatile the moment the value is
655 # saved to stack for transfer to IALU. X[4] becomes volatile whenever
656 # X[-4] is accumulated and offloaded to backtrace ring buffer, at the
657 # end it is loaded with next K_XX_XX [which becomes X[3] in next
660 sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
663 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
666 eval(shift(@insns)); # ror
669 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
670 &movdqa (@X[2],@X[-1&7]);
674 &paddd (@X[3],@X[-1&7]);
675 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
676 eval(shift(@insns)); # rol
678 &psrldq (@X[2],4); # "X[-3]", 3 dwords
681 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
683 eval(shift(@insns)); # ror
685 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
690 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
692 eval(shift(@insns)); # rol
693 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
697 &movdqa (@X[4],@X[0]);
700 eval(shift(@insns)); # ror
701 &movdqa (@X[2],@X[0]);
704 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword
705 &paddd (@X[0],@X[0]);
711 eval(shift(@insns)); # rol
712 &movdqa (@X[3],@X[4]);
719 eval(shift(@insns)); # ror
720 &por (@X[0],@X[2]); # "X[0]"<<<=1
722 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
728 eval(shift(@insns)); # rol
730 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
734 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2
735 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7])
736 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7);
740 foreach (@insns) { eval; } # remaining instructions [if any]
742 $Xi++; push(@X,shift(@X)); # "rotate" X[]
745 sub Xupdate_ssse3_32_79()
748 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
751 eval(shift(@insns)); # body_20_39
752 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
753 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8)
756 eval(shift(@insns)); # rol
758 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
759 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
762 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
764 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
765 } else { # ... or load next one
766 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
768 eval(shift(@insns)); # ror
769 &paddd (@X[3],@X[-1&7]);
772 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]"
773 eval(shift(@insns)); # body_20_39
776 eval(shift(@insns)); # rol
778 &movdqa (@X[2],@X[0]);
779 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
782 eval(shift(@insns)); # ror
784 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
787 eval(shift(@insns)); # body_20_39
791 eval(shift(@insns)); # rol
794 eval(shift(@insns)); # ror
796 eval(shift(@insns)) if (@insns[1] =~ /_rol/);
797 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
799 &por (@X[0],@X[2]); # "X[0]"<<<=2
800 eval(shift(@insns)); # body_20_39
802 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
804 eval(shift(@insns)); # rol
807 eval(shift(@insns)); # ror
808 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0])
811 foreach (@insns) { eval; } # remaining instructions
813 $Xi++; push(@X,shift(@X)); # "rotate" X[]
816 sub Xuplast_ssse3_80()
819 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
829 &paddd (@X[3],@X[-1&7]);
835 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
837 foreach (@insns) { eval; } # remaining instructions
839 &mov ($inp=@T[1],&DWP(192+4,"esp"));
840 &cmp ($inp,&DWP(192+8,"esp"));
841 &je (&label("done"));
843 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19
844 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask
845 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input
846 &movdqu (@X[-3&7],&QWP(16,$inp));
847 &movdqu (@X[-2&7],&QWP(32,$inp));
848 &movdqu (@X[-1&7],&QWP(48,$inp));
850 &pshufb (@X[-4&7],@X[2]); # byte swap
851 &mov (&DWP(192+4,"esp"),$inp);
852 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
860 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
870 &pshufb (@X[($Xi-3)&7],@X[2]);
875 &paddd (@X[($Xi-4)&7],@X[3]);
880 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU
885 &psubd (@X[($Xi-4)&7],@X[3]);
887 foreach (@insns) { eval; }
894 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
897 foreach (@insns) { eval; }
900 sub body_00_19 () { # ((c^d)&b)^d
901 # on start @T[0]=(c^d)&b
902 return &body_20_39() if ($rx==19); $rx++;
904 '($a,$b,$c,$d,$e)=@V;'.
905 '&$_ror ($b,$j?7:2);', # $b>>>2
907 '&mov (@T[1],$a);', # $b in next round
909 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
910 '&xor ($b,$c);', # $c^$d for next round
914 '&and (@T[1],$b);', # ($b&($c^$d)) for next round
916 '&xor ($b,$c);', # restore $b
917 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
921 sub body_20_39 () { # b^d^c
923 return &body_40_59() if ($rx==39); $rx++;
925 '($a,$b,$c,$d,$e)=@V;'.
926 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
927 '&xor (@T[0],$d) if($j==19);'.
928 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c)
929 '&mov (@T[1],$a);', # $b in next round
933 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round
935 '&$_ror ($b,7);', # $b>>>2
936 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
940 sub body_40_59 () { # ((b^c)&(c^d))^c
941 # on entry @T[0]=(b^c), (c^=d)
944 '($a,$b,$c,$d,$e)=@V;'.
945 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
946 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d)
947 '&xor ($c,$d) if ($j>=40);', # restore $c
949 '&$_ror ($b,7);', # $b>>>2
950 '&mov (@T[1],$a);', # $b for next round
955 '&xor (@T[1],$c) if ($j==59);'.
956 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round
958 '&xor ($b,$c) if ($j< 59);', # c^d for next round
959 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
963 sub bodyx_00_19 () { # ((c^d)&b)^d
964 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K
965 return &bodyx_20_39() if ($rx==19); $rx++;
967 '($a,$b,$c,$d,$e)=@V;'.
969 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2
970 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2
971 '&lea ($e,&DWP(0,$e,@T[0]));',
972 '&rorx (@T[0],$a,5);',
974 '&andn (@T[1],$a,$c);',
976 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer
979 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
983 sub bodyx_20_39 () { # b^d^c
985 return &bodyx_40_59() if ($rx==39); $rx++;
987 '($a,$b,$c,$d,$e)=@V;'.
989 '&add ($e,($j==19?@T[0]:$b))',
990 '&rorx ($b,@T[1],7);', # $b>>>2
991 '&rorx (@T[0],$a,5);',
993 '&xor ($a,$b) if ($j<79);',
994 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer
995 '&xor ($a,$c) if ($j<79);',
996 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1000 sub bodyx_40_59 () { # ((b^c)&(c^d))^c
1001 # on start $b=((b^c)&(c^d))^c
1002 return &bodyx_20_39() if ($rx==59); $rx++;
1004 '($a,$b,$c,$d,$e)=@V;'.
1006 '&rorx (@T[0],$a,5)',
1007 '&lea ($e,&DWP(0,$e,$b))',
1008 '&rorx ($b,@T[1],7)', # $b>>>2
1009 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer
1012 '&xor ($a,$b)', # b^c for next round
1013 '&xor (@T[1],$b)', # c^d for next round
1017 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1021 &set_label("loop",16);
1022 &Xupdate_ssse3_16_31(\&body_00_19);
1023 &Xupdate_ssse3_16_31(\&body_00_19);
1024 &Xupdate_ssse3_16_31(\&body_00_19);
1025 &Xupdate_ssse3_16_31(\&body_00_19);
1026 &Xupdate_ssse3_32_79(\&body_00_19);
1027 &Xupdate_ssse3_32_79(\&body_20_39);
1028 &Xupdate_ssse3_32_79(\&body_20_39);
1029 &Xupdate_ssse3_32_79(\&body_20_39);
1030 &Xupdate_ssse3_32_79(\&body_20_39);
1031 &Xupdate_ssse3_32_79(\&body_20_39);
1032 &Xupdate_ssse3_32_79(\&body_40_59);
1033 &Xupdate_ssse3_32_79(\&body_40_59);
1034 &Xupdate_ssse3_32_79(\&body_40_59);
1035 &Xupdate_ssse3_32_79(\&body_40_59);
1036 &Xupdate_ssse3_32_79(\&body_40_59);
1037 &Xupdate_ssse3_32_79(\&body_20_39);
1038 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
1040 $saved_j=$j; @saved_V=@V;
1042 &Xloop_ssse3(\&body_20_39);
1043 &Xloop_ssse3(\&body_20_39);
1044 &Xloop_ssse3(\&body_20_39);
1046 &mov (@T[1],&DWP(192,"esp")); # update context
1047 &add ($A,&DWP(0,@T[1]));
1048 &add (@T[0],&DWP(4,@T[1])); # $b
1049 &add ($C,&DWP(8,@T[1]));
1050 &mov (&DWP(0,@T[1]),$A);
1051 &add ($D,&DWP(12,@T[1]));
1052 &mov (&DWP(4,@T[1]),@T[0]);
1053 &add ($E,&DWP(16,@T[1]));
1054 &mov (&DWP(8,@T[1]),$C);
1056 &mov (&DWP(12,@T[1]),$D);
1058 &mov (&DWP(16,@T[1]),$E);
1060 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
1064 &jmp (&label("loop"));
1066 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1068 &Xtail_ssse3(\&body_20_39);
1069 &Xtail_ssse3(\&body_20_39);
1070 &Xtail_ssse3(\&body_20_39);
1072 &mov (@T[1],&DWP(192,"esp")); # update context
1073 &add ($A,&DWP(0,@T[1]));
1074 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1075 &add (@T[0],&DWP(4,@T[1])); # $b
1076 &add ($C,&DWP(8,@T[1]));
1077 &mov (&DWP(0,@T[1]),$A);
1078 &add ($D,&DWP(12,@T[1]));
1079 &mov (&DWP(4,@T[1]),@T[0]);
1080 &add ($E,&DWP(16,@T[1]));
1081 &mov (&DWP(8,@T[1]),$C);
1082 &mov (&DWP(12,@T[1]),$D);
1083 &mov (&DWP(16,@T[1]),$E);
1085 &function_end("_sha1_block_data_order_ssse3");
1090 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
1091 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
1092 my @V=($A,$B,$C,$D,$E);
1093 my $j=0; # hash round
1097 my $_rol=sub { &shld(@_[0],@_) };
1098 my $_ror=sub { &shrd(@_[0],@_) };
1100 &function_begin("_sha1_block_data_order_avx");
1101 &call (&label("pic_point")); # make it PIC!
1102 &set_label("pic_point");
1104 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
1105 &set_label("avx_shortcut");
1108 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19
1109 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39
1110 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59
1111 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79
1112 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask
1114 &mov ($E,&wparam(0)); # load argument block
1115 &mov ($inp=@T[1],&wparam(1));
1116 &mov ($D,&wparam(2));
1119 # stack frame layout
1121 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
1122 # X[4]+K X[5]+K X[6]+K X[7]+K
1123 # X[8]+K X[9]+K X[10]+K X[11]+K
1124 # X[12]+K X[13]+K X[14]+K X[15]+K
1126 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
1127 # X[4] X[5] X[6] X[7]
1128 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
1130 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
1131 # K_40_59 K_40_59 K_40_59 K_40_59
1132 # K_60_79 K_60_79 K_60_79 K_60_79
1133 # K_00_19 K_00_19 K_00_19 K_00_19
1136 # +192 ctx # argument block
1143 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants
1144 &vmovdqa(&QWP(112+16,"esp"),@X[5]);
1145 &vmovdqa(&QWP(112+32,"esp"),@X[6]);
1146 &shl ($D,6); # len*64
1147 &vmovdqa(&QWP(112+48,"esp"),@X[3]);
1148 &add ($D,$inp); # end of input
1149 &vmovdqa(&QWP(112+64,"esp"),@X[2]);
1151 &mov (&DWP(192+0,"esp"),$E); # save argument block
1152 &mov (&DWP(192+4,"esp"),$inp);
1153 &mov (&DWP(192+8,"esp"),$D);
1154 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
1156 &mov ($A,&DWP(0,$E)); # load context
1157 &mov ($B,&DWP(4,$E));
1158 &mov ($C,&DWP(8,$E));
1159 &mov ($D,&DWP(12,$E));
1160 &mov ($E,&DWP(16,$E));
1161 &mov (@T[0],$B); # magic seed
1163 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
1164 &vmovdqu(@X[-3&7],&QWP(-48,$inp));
1165 &vmovdqu(@X[-2&7],&QWP(-32,$inp));
1166 &vmovdqu(@X[-1&7],&QWP(-16,$inp));
1167 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1168 &vpshufb(@X[-3&7],@X[-3&7],@X[2]);
1169 &vpshufb(@X[-2&7],@X[-2&7],@X[2]);
1170 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1171 &vpshufb(@X[-1&7],@X[-1&7],@X[2]);
1172 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19
1173 &vpaddd (@X[1],@X[-3&7],@X[3]);
1174 &vpaddd (@X[2],@X[-2&7],@X[3]);
1175 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU
1177 &vmovdqa(&QWP(0+16,"esp"),@X[1]);
1179 &vmovdqa(&QWP(0+32,"esp"),@X[2]);
1181 &jmp (&label("loop"));
1183 sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
1186 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
1187 my ($a,$b,$c,$d,$e);
1189 eval(shift(@insns));
1190 eval(shift(@insns));
1191 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
1192 eval(shift(@insns));
1193 eval(shift(@insns));
1195 &vpaddd (@X[3],@X[3],@X[-1&7]);
1196 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
1197 eval(shift(@insns));
1198 eval(shift(@insns));
1199 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords
1200 eval(shift(@insns));
1201 eval(shift(@insns));
1202 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
1203 eval(shift(@insns));
1204 eval(shift(@insns));
1206 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
1207 eval(shift(@insns));
1208 eval(shift(@insns));
1209 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1210 eval(shift(@insns));
1211 eval(shift(@insns));
1213 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
1214 eval(shift(@insns));
1215 eval(shift(@insns));
1216 eval(shift(@insns));
1217 eval(shift(@insns));
1219 &vpsrld (@X[2],@X[0],31);
1220 eval(shift(@insns));
1221 eval(shift(@insns));
1222 eval(shift(@insns));
1223 eval(shift(@insns));
1225 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword
1226 &vpaddd (@X[0],@X[0],@X[0]);
1227 eval(shift(@insns));
1228 eval(shift(@insns));
1229 eval(shift(@insns));
1230 eval(shift(@insns));
1232 &vpsrld (@X[3],@X[4],30);
1233 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1
1234 eval(shift(@insns));
1235 eval(shift(@insns));
1236 eval(shift(@insns));
1237 eval(shift(@insns));
1239 &vpslld (@X[4],@X[4],2);
1240 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
1241 eval(shift(@insns));
1242 eval(shift(@insns));
1243 &vpxor (@X[0],@X[0],@X[3]);
1244 eval(shift(@insns));
1245 eval(shift(@insns));
1246 eval(shift(@insns));
1247 eval(shift(@insns));
1249 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2
1250 eval(shift(@insns));
1251 eval(shift(@insns));
1252 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
1253 eval(shift(@insns));
1254 eval(shift(@insns));
1256 foreach (@insns) { eval; } # remaining instructions [if any]
1258 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1261 sub Xupdate_avx_32_79()
1264 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
1265 my ($a,$b,$c,$d,$e);
1267 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
1268 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
1269 eval(shift(@insns)); # body_20_39
1270 eval(shift(@insns));
1271 eval(shift(@insns));
1272 eval(shift(@insns)); # rol
1274 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
1275 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
1276 eval(shift(@insns));
1277 eval(shift(@insns));
1279 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
1280 } else { # ... or load next one
1281 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
1283 &vpaddd (@X[3],@X[3],@X[-1&7]);
1284 eval(shift(@insns)); # ror
1285 eval(shift(@insns));
1287 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]"
1288 eval(shift(@insns)); # body_20_39
1289 eval(shift(@insns));
1290 eval(shift(@insns));
1291 eval(shift(@insns)); # rol
1293 &vpsrld (@X[2],@X[0],30);
1294 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1295 eval(shift(@insns));
1296 eval(shift(@insns));
1297 eval(shift(@insns)); # ror
1298 eval(shift(@insns));
1300 &vpslld (@X[0],@X[0],2);
1301 eval(shift(@insns)); # body_20_39
1302 eval(shift(@insns));
1303 eval(shift(@insns));
1304 eval(shift(@insns)); # rol
1305 eval(shift(@insns));
1306 eval(shift(@insns));
1307 eval(shift(@insns)); # ror
1308 eval(shift(@insns));
1310 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2
1311 eval(shift(@insns)); # body_20_39
1312 eval(shift(@insns));
1313 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
1314 eval(shift(@insns));
1315 eval(shift(@insns)); # rol
1316 eval(shift(@insns));
1317 eval(shift(@insns));
1318 eval(shift(@insns)); # ror
1319 eval(shift(@insns));
1321 foreach (@insns) { eval; } # remaining instructions
1323 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1326 sub Xuplast_avx_80()
1329 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1330 my ($a,$b,$c,$d,$e);
1332 eval(shift(@insns));
1333 &vpaddd (@X[3],@X[3],@X[-1&7]);
1334 eval(shift(@insns));
1335 eval(shift(@insns));
1336 eval(shift(@insns));
1337 eval(shift(@insns));
1339 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
1341 foreach (@insns) { eval; } # remaining instructions
1343 &mov ($inp=@T[1],&DWP(192+4,"esp"));
1344 &cmp ($inp,&DWP(192+8,"esp"));
1345 &je (&label("done"));
1347 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19
1348 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask
1349 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input
1350 &vmovdqu(@X[-3&7],&QWP(16,$inp));
1351 &vmovdqu(@X[-2&7],&QWP(32,$inp));
1352 &vmovdqu(@X[-1&7],&QWP(48,$inp));
1354 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1355 &mov (&DWP(192+4,"esp"),$inp);
1356 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1364 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1365 my ($a,$b,$c,$d,$e);
1367 eval(shift(@insns));
1368 eval(shift(@insns));
1369 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
1370 eval(shift(@insns));
1371 eval(shift(@insns));
1372 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
1373 eval(shift(@insns));
1374 eval(shift(@insns));
1375 eval(shift(@insns));
1376 eval(shift(@insns));
1377 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU
1378 eval(shift(@insns));
1379 eval(shift(@insns));
1381 foreach (@insns) { eval; }
1388 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1389 my ($a,$b,$c,$d,$e);
1391 foreach (@insns) { eval; }
1394 &set_label("loop",16);
1395 &Xupdate_avx_16_31(\&body_00_19);
1396 &Xupdate_avx_16_31(\&body_00_19);
1397 &Xupdate_avx_16_31(\&body_00_19);
1398 &Xupdate_avx_16_31(\&body_00_19);
1399 &Xupdate_avx_32_79(\&body_00_19);
1400 &Xupdate_avx_32_79(\&body_20_39);
1401 &Xupdate_avx_32_79(\&body_20_39);
1402 &Xupdate_avx_32_79(\&body_20_39);
1403 &Xupdate_avx_32_79(\&body_20_39);
1404 &Xupdate_avx_32_79(\&body_20_39);
1405 &Xupdate_avx_32_79(\&body_40_59);
1406 &Xupdate_avx_32_79(\&body_40_59);
1407 &Xupdate_avx_32_79(\&body_40_59);
1408 &Xupdate_avx_32_79(\&body_40_59);
1409 &Xupdate_avx_32_79(\&body_40_59);
1410 &Xupdate_avx_32_79(\&body_20_39);
1411 &Xuplast_avx_80(\&body_20_39); # can jump to "done"
1413 $saved_j=$j; @saved_V=@V;
1415 &Xloop_avx(\&body_20_39);
1416 &Xloop_avx(\&body_20_39);
1417 &Xloop_avx(\&body_20_39);
1419 &mov (@T[1],&DWP(192,"esp")); # update context
1420 &add ($A,&DWP(0,@T[1]));
1421 &add (@T[0],&DWP(4,@T[1])); # $b
1422 &add ($C,&DWP(8,@T[1]));
1423 &mov (&DWP(0,@T[1]),$A);
1424 &add ($D,&DWP(12,@T[1]));
1425 &mov (&DWP(4,@T[1]),@T[0]);
1426 &add ($E,&DWP(16,@T[1]));
1428 &mov (&DWP(8,@T[1]),$C);
1430 &mov (&DWP(12,@T[1]),$D);
1431 &mov (&DWP(16,@T[1]),$E);
1436 &jmp (&label("loop"));
1438 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1440 &Xtail_avx(\&body_20_39);
1441 &Xtail_avx(\&body_20_39);
1442 &Xtail_avx(\&body_20_39);
1446 &mov (@T[1],&DWP(192,"esp")); # update context
1447 &add ($A,&DWP(0,@T[1]));
1448 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1449 &add (@T[0],&DWP(4,@T[1])); # $b
1450 &add ($C,&DWP(8,@T[1]));
1451 &mov (&DWP(0,@T[1]),$A);
1452 &add ($D,&DWP(12,@T[1]));
1453 &mov (&DWP(4,@T[1]),@T[0]);
1454 &add ($E,&DWP(16,@T[1]));
1455 &mov (&DWP(8,@T[1]),$C);
1456 &mov (&DWP(12,@T[1]),$D);
1457 &mov (&DWP(16,@T[1]),$E);
1458 &function_end("_sha1_block_data_order_avx");
1460 &set_label("K_XX_XX",64);
1461 &data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19
1462 &data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39
1463 &data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
1464 &data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
1465 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
1466 &data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
1468 &asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");