3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # Multi-buffer SHA1 procedure processes n buffers in parallel by
11 # placing buffer data to designated lane of SIMD register. n is
12 # naturally limited to 4 on pre-AVX2 processors and to 8 on
13 # AVX2-capable processors such as Haswell.
15 # this +aesni(i) sha1 aesni-sha1 gain(iv)
16 # -------------------------------------------------------------------
17 # Westmere(ii) 10.7/n +1.28=3.96(n=4) 5.30 6.66 +68%
18 # Atom(ii) 18.9?/n +3.93=8.66(n=4) 10.0 14.0 +62%
19 # Sandy Bridge (8.16 +5.15=13.3)/n 4.99 5.98 +80%
20 # Ivy Bridge (8.08 +5.14=13.2)/n 4.60 5.54 +68%
21 # Haswell(iii) (8.96 +5.00=14.0)/n 3.57 4.55 +160%
22 # Bulldozer (9.76 +5.76=15.5)/n 5.95 6.37 +64%
24 # (i) multi-block CBC encrypt with 128-bit key;
25 # (ii) (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
26 # because of lower AES-NI instruction throughput;
27 # (iii) "this" is for n=8, when we gather twice as much data, result
28 # for n=4 is 8.00+4.44=12.4;
29 # (iv) presented improvement coefficients are asymptotic limits and
30 # in real-life application are somewhat lower, e.g. for 2KB
31 # fragments they range from 30% to 100% (on Haswell);
35 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
37 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
39 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
40 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
41 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
42 die "can't locate x86_64-xlate.pl";
46 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
47 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
48 $avx = ($1>=2.19) + ($1>=2.22);
51 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
52 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
53 $avx = ($1>=2.09) + ($1>=2.10);
56 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
57 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
58 $avx = ($1>=10) + ($1>=11);
61 open OUT,"| \"$^X\" $xlate $flavour $output";
64 # void sha1_multi_block (
65 # struct { unsigned int A[8];
69 # unsigned int E[8]; } *ctx,
70 # struct { void *ptr; int blocks; } inp[8],
71 # int num); /* 1 or 2 */
73 $ctx="%rdi"; # 1st arg
74 $inp="%rsi"; # 2nd arg
76 @ptr=map("%r$_",(8..11));
79 @V=($A,$B,$C,$D,$E)=map("%xmm$_",(0..4));
80 ($t0,$t1,$t2,$t3,$tx)=map("%xmm$_",(5..9));
81 @Xi=map("%xmm$_",(10..14));
85 # Atom-specific optimization aiming to eliminate pshufb with high
86 # registers [and thus get rid of 48 cycles accumulated penalty]
87 @Xi=map("%xmm$_",(0..4));
88 ($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9));
89 @V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14));
97 $off %= 16; $off *= $REG_SZ;
98 $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
102 my ($i,$a,$b,$c,$d,$e)=@_;
106 $code.=<<___ if ($i==0);
107 movd (@ptr[0]),@Xi[0]
108 lea `16*4`(@ptr[0]),@ptr[0]
109 movd (@ptr[1]),@Xi[2] # borrow @Xi[2]
110 lea `16*4`(@ptr[1]),@ptr[1]
111 movd (@ptr[2]),@Xi[3] # borrow @Xi[3]
112 lea `16*4`(@ptr[2]),@ptr[2]
113 movd (@ptr[3]),@Xi[4] # borrow @Xi[4]
114 lea `16*4`(@ptr[3]),@ptr[3]
115 punpckldq @Xi[3],@Xi[0]
116 movd `4*$j-16*4`(@ptr[0]),@Xi[1]
117 punpckldq @Xi[4],@Xi[2]
118 movd `4*$j-16*4`(@ptr[1]),$t3
119 punpckldq @Xi[2],@Xi[0]
120 movd `4*$j-16*4`(@ptr[2]),$t2
123 $code.=<<___ if ($i<14); # just load input
124 movd `4*$j-16*4`(@ptr[3]),$t1
127 paddd $K,$e # e+=K_00_19
137 movdqa @Xi[0],`&Xi_off($i)`
138 paddd @Xi[0],$e # e+=X[i]
139 movd `4*$k-16*4`(@ptr[0]),@Xi[2]
141 pxor $t1,$t0 # Ch(b,c,d)
144 por $t3,$t2 # rol(a,5)
145 movd `4*$k-16*4`(@ptr[1]),$t3
147 paddd $t0,$e # e+=Ch(b,c,d)
150 paddd $t2,$e # e+=rol(a,5)
152 movd `4*$j-16*4`(@ptr[2]),$t2
153 por $t1,$b # b=rol(b,30)
155 $code.=<<___ if ($i==14); # just load input
156 movd `4*$j-16*4`(@ptr[3]),$t1
159 paddd $K,$e # e+=K_00_19
164 prefetcht0 63(@ptr[0])
170 movdqa @Xi[0],`&Xi_off($i)`
171 paddd @Xi[0],$e # e+=X[i]
173 pxor $t1,$t0 # Ch(b,c,d)
175 prefetcht0 63(@ptr[1])
177 por $t3,$t2 # rol(a,5)
179 paddd $t0,$e # e+=Ch(b,c,d)
180 prefetcht0 63(@ptr[2])
183 paddd $t2,$e # e+=rol(a,5)
185 prefetcht0 63(@ptr[3])
186 por $t1,$b # b=rol(b,30)
188 $code.=<<___ if ($i>=13 && $i<15);
189 movdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
191 $code.=<<___ if ($i>=15); # apply Xupdate
192 pxor @Xi[-2],@Xi[1] # "X[13]"
193 movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
196 pxor `&Xi_off($j+8)`,@Xi[1]
197 paddd $K,$e # e+=K_00_19
209 movdqa @Xi[0],`&Xi_off($i)`
210 paddd @Xi[0],$e # e+=X[i]
212 pxor $t1,$t0 # Ch(b,c,d)
215 por $t3,$t2 # rol(a,5)
217 paddd $t0,$e # e+=Ch(b,c,d)
220 paddd $t2,$e # e+=rol(a,5)
221 por $tx,@Xi[1] # rol \$1,@Xi[1]
222 por $t1,$b # b=rol(b,30)
224 push(@Xi,shift(@Xi));
228 my ($i,$a,$b,$c,$d,$e)=@_;
231 $code.=<<___ if ($i<79);
232 pxor @Xi[-2],@Xi[1] # "X[13]"
233 movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
237 pxor `&Xi_off($j+8)`,@Xi[1]
238 paddd $K,$e # e+=K_20_39
244 $code.=<<___ if ($i<72);
245 movdqa @Xi[0],`&Xi_off($i)`
247 $code.=<<___ if ($i<79);
248 paddd @Xi[0],$e # e+=X[i]
251 pxor $c,$t0 # Parity(b,c,d)
256 por $t3,$t2 # rol(a,5)
258 paddd $t0,$e # e+=Parity(b,c,d)
262 paddd $t2,$e # e+=rol(a,5)
263 por $tx,@Xi[1] # rol(@Xi[1],1)
264 por $t1,$b # b=rol(b,30)
266 $code.=<<___ if ($i==79);
268 paddd $K,$e # e+=K_20_39
274 paddd @Xi[0],$e # e+=X[i]
277 pxor $c,$t0 # Parity(b,c,d)
280 por $t3,$t2 # rol(a,5)
281 paddd $t0,$e # e+=Parity(b,c,d)
284 paddd $t2,$e # e+=rol(a,5)
285 por $t1,$b # b=rol(b,30)
287 push(@Xi,shift(@Xi));
291 my ($i,$a,$b,$c,$d,$e)=@_;
295 pxor @Xi[-2],@Xi[1] # "X[13]"
296 movdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
300 pxor `&Xi_off($j+8)`,@Xi[1]
302 paddd $K,$e # e+=K_40_59
313 movdqa @Xi[0],`&Xi_off($i)`
314 paddd @Xi[0],$e # e+=X[i]
315 por $t3,$t2 # rol(a,5)
322 paddd $t0,$e # e+=Maj(b,d,c)
325 paddd $t2,$e # e+=rol(a,5)
326 por $tx,@Xi[1] # rol(@X[1],1)
327 por $t1,$b # b=rol(b,30)
329 push(@Xi,shift(@Xi));
335 .extern OPENSSL_ia32cap_P
337 .globl sha1_multi_block
338 .type sha1_multi_block,\@function,3
342 $code.=<<___ if ($avx);
343 mov OPENSSL_ia32cap_P+4(%rip),%rcx
352 $code.=<<___ if ($win64);
355 movaps %xmm7,0x10(%rsp)
356 movaps %xmm8,0x20(%rsp)
357 movaps %xmm9,0x30(%rsp)
358 movaps %xmm10,-0x78(%rax)
359 movaps %xmm11,-0x68(%rax)
360 movaps %xmm12,-0x58(%rax)
361 movaps %xmm13,-0x48(%rax)
362 movaps %xmm14,-0x38(%rax)
363 movaps %xmm15,-0x28(%rax)
366 sub \$`$REG_SZ*18`,%rsp
368 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
369 lea K_XX_XX(%rip),$Tbl
370 lea `$REG_SZ*16`(%rsp),%rbx
373 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
376 for($i=0;$i<4;$i++) {
378 mov `16*$i+0`($inp),@ptr[$i] # input pointer
379 mov `16*$i+8`($inp),%ecx # number of blocks
381 cmovg %ecx,$num # find maximum
383 mov %ecx,`4*$i`(%rbx) # initialize counters
384 cmovle $Tbl,@ptr[$i] # cancel input
391 movdqu 0x00($ctx),$A # load context
397 movdqa 0x60($Tbl),$tx # pbswap_mask
398 movdqa -0x20($Tbl),$K # K_00_19
404 for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
405 $code.=" movdqa 0x00($Tbl),$K\n"; # K_20_39
406 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
407 $code.=" movdqa 0x20($Tbl),$K\n"; # K_40_59
408 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
409 $code.=" movdqa 0x40($Tbl),$K\n"; # K_60_79
410 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
412 movdqa (%rbx),@Xi[0] # pull counters
414 cmp 4*0(%rbx),%ecx # examinte counters
416 cmovge $Tbl,@ptr[0] # cancel input
421 pcmpgtd $t2,@Xi[1] # mask value
424 paddd @Xi[1],@Xi[0] # counters--
427 movdqu 0x00($ctx),$t0
429 movdqu 0x20($ctx),$t1
432 movdqu 0x40($ctx),$t2
435 movdqu 0x60($ctx),$t3
438 movdqu 0x80($ctx),$tx
448 movdqa @Xi[0],(%rbx) # save counters
449 movdqa 0x60($Tbl),$tx # pbswap_mask
450 movdqa -0x20($Tbl),$K # K_00_19
454 mov `$REG_SZ*17+8`(%rsp),$num
455 lea $REG_SZ($ctx),$ctx
456 lea `16*$REG_SZ/4`($inp),$inp
461 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
463 $code.=<<___ if ($win64);
464 movaps -0xb8(%rax),%xmm6
465 movaps -0xa8(%rax),%xmm7
466 movaps -0x98(%rax),%xmm8
467 movaps -0x88(%rax),%xmm9
468 movaps -0x78(%rax),%xmm10
469 movaps -0x68(%rax),%xmm11
470 movaps -0x58(%rax),%xmm12
471 movaps -0x48(%rax),%xmm13
472 movaps -0x38(%rax),%xmm14
473 movaps -0x28(%rax),%xmm15
480 .size sha1_multi_block,.-sha1_multi_block
485 my ($i,$a,$b,$c,$d,$e)=@_;
488 my $vpack = $REG_SZ==16 ? "vpunpckldq" : "vinserti128";
489 my $ptr_n = $REG_SZ==16 ? @ptr[1] : @ptr[4];
491 $code.=<<___ if ($i==0 && $REG_SZ==16);
492 vmovd (@ptr[0]),@Xi[0]
493 lea `16*4`(@ptr[0]),@ptr[0]
494 vmovd (@ptr[1]),@Xi[2] # borrow Xi[2]
495 lea `16*4`(@ptr[1]),@ptr[1]
496 vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
497 lea `16*4`(@ptr[2]),@ptr[2]
498 vpinsrd \$1,(@ptr[3]),@Xi[2],@Xi[2]
499 lea `16*4`(@ptr[3]),@ptr[3]
500 vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
501 vpunpckldq @Xi[2],@Xi[0],@Xi[0]
502 vmovd `4*$j-16*4`($ptr_n),$t3
503 vpshufb $tx,@Xi[0],@Xi[0]
505 $code.=<<___ if ($i<15 && $REG_SZ==16); # just load input
506 vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
507 vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t3,$t3
509 $code.=<<___ if ($i==0 && $REG_SZ==32);
510 vmovd (@ptr[0]),@Xi[0]
511 lea `16*4`(@ptr[0]),@ptr[0]
512 vmovd (@ptr[4]),@Xi[2] # borrow Xi[2]
513 lea `16*4`(@ptr[4]),@ptr[4]
515 lea `16*4`(@ptr[1]),@ptr[1]
517 lea `16*4`(@ptr[5]),@ptr[5]
518 vpinsrd \$1,(@ptr[2]),@Xi[0],@Xi[0]
519 lea `16*4`(@ptr[2]),@ptr[2]
520 vpinsrd \$1,(@ptr[6]),@Xi[2],@Xi[2]
521 lea `16*4`(@ptr[6]),@ptr[6]
522 vpinsrd \$1,(@ptr[3]),$t2,$t2
523 lea `16*4`(@ptr[3]),@ptr[3]
524 vpunpckldq $t2,@Xi[0],@Xi[0]
525 vpinsrd \$1,(@ptr[7]),$t1,$t1
526 lea `16*4`(@ptr[7]),@ptr[7]
527 vpunpckldq $t1,@Xi[2],@Xi[2]
528 vmovd `4*$j-16*4`(@ptr[0]),@Xi[1]
529 vinserti128 @Xi[2],@Xi[0],@Xi[0]
530 vmovd `4*$j-16*4`($ptr_n),$t3
531 vpshufb $tx,@Xi[0],@Xi[0]
533 $code.=<<___ if ($i<15 && $REG_SZ==32); # just load input
534 vmovd `4*$j-16*4`(@ptr[1]),$t2
535 vmovd `4*$j-16*4`(@ptr[5]),$t1
536 vpinsrd \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
537 vpinsrd \$1,`4*$j-16*4`(@ptr[6]),$t3,$t3
538 vpinsrd \$1,`4*$j-16*4`(@ptr[3]),$t2,$t2
539 vpunpckldq $t2,@Xi[1],@Xi[1]
540 vpinsrd \$1,`4*$j-16*4`(@ptr[7]),$t1,$t1
541 vpunpckldq $t1,$t3,$t3
543 $code.=<<___ if ($i<14);
544 vpaddd $K,$e,$e # e+=K_00_19
549 vmovdqa @Xi[0],`&Xi_off($i)`
550 vpaddd @Xi[0],$e,$e # e+=X[i]
551 $vpack $t3,@Xi[1],@Xi[1]
553 vpxor $t1,$t0,$t0 # Ch(b,c,d)
554 vmovd `4*$k-16*4`(@ptr[0]),@Xi[2]
557 vpor $t3,$t2,$t2 # rol(a,5)
558 vmovd `4*$k-16*4`($ptr_n),$t3
559 vpaddd $t0,$e,$e # e+=Ch(b,c,d)
562 vpaddd $t2,$e,$e # e+=rol(a,5)
563 vpshufb $tx,@Xi[1],@Xi[1]
564 vpor $t1,$b,$b # b=rol(b,30)
566 $code.=<<___ if ($i==14);
567 vpaddd $K,$e,$e # e+=K_00_19
568 prefetcht0 63(@ptr[0])
573 vmovdqa @Xi[0],`&Xi_off($i)`
574 vpaddd @Xi[0],$e,$e # e+=X[i]
575 $vpack $t3,@Xi[1],@Xi[1]
577 prefetcht0 63(@ptr[1])
578 vpxor $t1,$t0,$t0 # Ch(b,c,d)
581 vpor $t3,$t2,$t2 # rol(a,5)
582 prefetcht0 63(@ptr[2])
583 vpaddd $t0,$e,$e # e+=Ch(b,c,d)
586 vpaddd $t2,$e,$e # e+=rol(a,5)
587 prefetcht0 63(@ptr[3])
588 vpshufb $tx,@Xi[1],@Xi[1]
589 vpor $t1,$b,$b # b=rol(b,30)
591 $code.=<<___ if ($i>=13 && $i<15);
592 vmovdqa `&Xi_off($j+2)`,@Xi[3] # preload "X[2]"
594 $code.=<<___ if ($i>=15); # apply Xupdate
595 vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
596 vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
598 vpaddd $K,$e,$e # e+=K_00_19
601 `"prefetcht0 63(@ptr[4])" if ($i==15 && $REG_SZ==32)`
604 vmovdqa @Xi[0],`&Xi_off($i)`
605 vpaddd @Xi[0],$e,$e # e+=X[i]
606 vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
608 vpxor $t1,$t0,$t0 # Ch(b,c,d)
609 vpxor @Xi[3],@Xi[1],@Xi[1]
610 `"prefetcht0 63(@ptr[5])" if ($i==15 && $REG_SZ==32)`
613 vpor $t3,$t2,$t2 # rol(a,5)
614 vpaddd $t0,$e,$e # e+=Ch(b,c,d)
615 `"prefetcht0 63(@ptr[6])" if ($i==15 && $REG_SZ==32)`
616 vpsrld \$31,@Xi[1],$tx
617 vpaddd @Xi[1],@Xi[1],@Xi[1]
620 `"prefetcht0 63(@ptr[7])" if ($i==15 && $REG_SZ==32)`
621 vpaddd $t2,$e,$e # e+=rol(a,5)
622 vpor $tx,@Xi[1],@Xi[1] # rol \$1,@Xi[1]
623 vpor $t1,$b,$b # b=rol(b,30)
625 push(@Xi,shift(@Xi));
629 my ($i,$a,$b,$c,$d,$e)=@_;
632 $code.=<<___ if ($i<79);
633 vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
634 vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
637 vpaddd $K,$e,$e # e+=K_20_39
640 $code.=<<___ if ($i<72);
641 vmovdqa @Xi[0],`&Xi_off($i)`
643 $code.=<<___ if ($i<79);
644 vpaddd @Xi[0],$e,$e # e+=X[i]
645 vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
647 vpxor $c,$t0,$t0 # Parity(b,c,d)
648 vpxor @Xi[3],@Xi[1],@Xi[1]
651 vpor $t3,$t2,$t2 # rol(a,5)
652 vpaddd $t0,$e,$e # e+=Parity(b,c,d)
653 vpsrld \$31,@Xi[1],$tx
654 vpaddd @Xi[1],@Xi[1],@Xi[1]
657 vpaddd $t2,$e,$e # e+=rol(a,5)
658 vpor $tx,@Xi[1],@Xi[1] # rol(@Xi[1],1)
659 vpor $t1,$b,$b # b=rol(b,30)
661 $code.=<<___ if ($i==79);
663 vpaddd $K,$e,$e # e+=K_20_39
667 vpaddd @Xi[0],$e,$e # e+=X[i]
668 vpxor $c,$t0,$t0 # Parity(b,c,d)
671 vpor $t3,$t2,$t2 # rol(a,5)
672 vpaddd $t0,$e,$e # e+=Parity(b,c,d)
675 vpaddd $t2,$e,$e # e+=rol(a,5)
676 vpor $t1,$b,$b # b=rol(b,30)
678 push(@Xi,shift(@Xi));
682 my ($i,$a,$b,$c,$d,$e)=@_;
686 vpxor @Xi[-2],@Xi[1],@Xi[1] # "X[13]"
687 vmovdqa `&Xi_off($j+2)`,@Xi[3] # "X[2]"
689 vpaddd $K,$e,$e # e+=K_40_59
692 vpxor `&Xi_off($j+8)`,@Xi[1],@Xi[1]
697 vpxor @Xi[3],@Xi[1],@Xi[1]
699 vmovdqu @Xi[0],`&Xi_off($i)`
700 vpaddd @Xi[0],$e,$e # e+=X[i]
701 vpor $t3,$t2,$t2 # rol(a,5)
702 vpsrld \$31,@Xi[1],$tx
704 vpaddd @Xi[1],@Xi[1],@Xi[1]
707 vpaddd $t0,$e,$e # e+=Maj(b,d,c)
710 vpaddd $t2,$e,$e # e+=rol(a,5)
711 vpor $tx,@Xi[1],@Xi[1] # rol(@X[1],1)
712 vpor $t1,$b,$b # b=rol(b,30)
714 push(@Xi,shift(@Xi));
718 .type sha1_multi_block_avx,\@function,3
720 sha1_multi_block_avx:
723 $code.=<<___ if ($avx>1);
738 $code.=<<___ if ($win64);
741 movaps %xmm7,0x10(%rsp)
742 movaps %xmm8,0x20(%rsp)
743 movaps %xmm9,0x30(%rsp)
744 movaps %xmm10,-0x78(%rax)
745 movaps %xmm11,-0x68(%rax)
746 movaps %xmm12,-0x58(%rax)
747 movaps %xmm13,-0x48(%rax)
748 movaps %xmm14,-0x38(%rax)
749 movaps %xmm15,-0x28(%rax)
752 sub \$`$REG_SZ*18`, %rsp
754 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
755 lea K_XX_XX(%rip),$Tbl
756 lea `$REG_SZ*16`(%rsp),%rbx
760 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
763 for($i=0;$i<4;$i++) {
765 mov `16*$i+0`($inp),@ptr[$i] # input pointer
766 mov `16*$i+8`($inp),%ecx # number of blocks
768 cmovg %ecx,$num # find maximum
770 mov %ecx,`4*$i`(%rbx) # initialize counters
771 cmovle $Tbl,@ptr[$i] # cancel input
778 vmovdqu 0x00($ctx),$A # load context
780 vmovdqu 0x20($ctx),$B
781 vmovdqu 0x40($ctx),$C
782 vmovdqu 0x60($ctx),$D
783 vmovdqu 0x80($ctx),$E
784 vmovdqu 0x60($Tbl),$tx # pbswap_mask
790 $code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
791 for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
792 $code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
793 for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
794 $code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
795 for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
796 $code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
797 for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
801 for($i=0;$i<4;$i++) {
803 cmp `4*$i`(%rbx),%ecx # examine counters
804 cmovge $Tbl,@ptr[$i] # cancel input
808 vmovdqu (%rbx),$t0 # pull counters
811 vpcmpgtd $t2,$t1,$t1 # mask value
812 vpaddd $t1,$t0,$t0 # counters--
816 vpaddd 0x00($ctx),$A,$A
818 vpaddd 0x20($ctx),$B,$B
820 vpaddd 0x40($ctx),$C,$C
822 vpaddd 0x60($ctx),$D,$D
823 vpaddd 0x80($ctx),$E,$E
824 vmovdqu $A,0x00($ctx)
825 vmovdqu $B,0x20($ctx)
826 vmovdqu $C,0x40($ctx)
827 vmovdqu $D,0x60($ctx)
828 vmovdqu $E,0x80($ctx)
830 vmovdqu $t0,(%rbx) # save counters
831 vmovdqu 0x60($Tbl),$tx # pbswap_mask
835 mov `$REG_SZ*17+8`(%rsp),$num
836 lea $REG_SZ($ctx),$ctx
837 lea `16*$REG_SZ/4`($inp),$inp
842 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
845 $code.=<<___ if ($win64);
846 movaps -0xb8(%rax),%xmm6
847 movaps -0xa8(%rax),%xmm7
848 movaps -0x98(%rax),%xmm8
849 movaps -0x88(%rax),%xmm9
850 movaps -0x78(%rax),%xmm10
851 movaps -0x68(%rax),%xmm11
852 movaps -0x58(%rax),%xmm12
853 movaps -0x48(%rax),%xmm13
854 movaps -0x38(%rax),%xmm14
855 movaps -0x28(%rax),%xmm15
862 .size sha1_multi_block_avx,.-sha1_multi_block_avx
866 $code =~ s/\`([^\`]*)\`/eval $1/gem;
870 @ptr=map("%r$_",(12..15,8..11));
872 @V=($A,$B,$C,$D,$E)=map("%ymm$_",(0..4));
873 ($t0,$t1,$t2,$t3,$tx)=map("%ymm$_",(5..9));
874 @Xi=map("%ymm$_",(10..14));
878 .type sha1_multi_block_avx2,\@function,3
880 sha1_multi_block_avx2:
890 $code.=<<___ if ($win64);
893 movaps %xmm7,0x10(%rsp)
894 movaps %xmm8,0x20(%rsp)
895 movaps %xmm9,0x30(%rsp)
896 movaps %xmm10,0x40(%rsp)
897 movaps %xmm11,0x50(%rsp)
898 movaps %xmm12,-0x78(%rax)
899 movaps %xmm13,-0x68(%rax)
900 movaps %xmm14,-0x58(%rax)
901 movaps %xmm15,-0x48(%rax)
904 sub \$`$REG_SZ*18`, %rsp
906 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
907 lea K_XX_XX(%rip),$Tbl
912 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
914 lea `$REG_SZ*16`(%rsp),%rbx
916 for($i=0;$i<8;$i++) {
918 mov `16*$i+0`($inp),@ptr[$i] # input pointer
919 mov `16*$i+8`($inp),%ecx # number of blocks
921 cmovg %ecx,$num # find maximum
923 mov %ecx,`4*$i`(%rbx) # initialize counters
924 cmovle $Tbl,@ptr[$i] # cancel input
928 vmovdqu 0x00($ctx),$A # load context
930 vmovdqu 0x20($ctx),$B
931 lea 256+128(%rsp),%rbx
932 vmovdqu 0x40($ctx),$C
933 vmovdqu 0x60($ctx),$D
934 vmovdqu 0x80($ctx),$E
935 vmovdqu 0x60($Tbl),$tx # pbswap_mask
941 $code.=" vmovdqa -0x20($Tbl),$K\n"; # K_00_19
942 for($i=0;$i<20;$i++) { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
943 $code.=" vmovdqa 0x00($Tbl),$K\n"; # K_20_39
944 for(;$i<40;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
945 $code.=" vmovdqa 0x20($Tbl),$K\n"; # K_40_59
946 for(;$i<60;$i++) { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
947 $code.=" vmovdqa 0x40($Tbl),$K\n"; # K_60_79
948 for(;$i<80;$i++) { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
951 lea `$REG_SZ*16`(%rsp),%rbx
953 for($i=0;$i<8;$i++) {
955 cmp `4*$i`(%rbx),%ecx # examine counters
956 cmovge $Tbl,@ptr[$i] # cancel input
960 vmovdqu (%rbx),$t0 # pull counters
963 vpcmpgtd $t2,$t1,$t1 # mask value
964 vpaddd $t1,$t0,$t0 # counters--
968 vpaddd 0x00($ctx),$A,$A
970 vpaddd 0x20($ctx),$B,$B
972 vpaddd 0x40($ctx),$C,$C
974 vpaddd 0x60($ctx),$D,$D
975 vpaddd 0x80($ctx),$E,$E
976 vmovdqu $A,0x00($ctx)
977 vmovdqu $B,0x20($ctx)
978 vmovdqu $C,0x40($ctx)
979 vmovdqu $D,0x60($ctx)
980 vmovdqu $E,0x80($ctx)
982 vmovdqu $t0,(%rbx) # save counters
983 lea 256+128(%rsp),%rbx
984 vmovdqu 0x60($Tbl),$tx # pbswap_mask
988 #mov `$REG_SZ*17+8`(%rsp),$num
989 #lea $REG_SZ($ctx),$ctx
990 #lea `16*$REG_SZ/4`($inp),$inp
992 #jnz .Loop_grande_avx2
995 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
998 $code.=<<___ if ($win64);
999 movaps -0xd8(%rax),%xmm6
1000 movaps -0xc8(%rax),%xmm7
1001 movaps -0xb8(%rax),%xmm8
1002 movaps -0xa8(%rax),%xmm9
1003 movaps -0x98(%rax),%xmm10
1004 movaps -0x88(%rax),%xmm11
1005 movaps -0x78(%rax),%xmm12
1006 movaps -0x68(%rax),%xmm13
1007 movaps -0x58(%rax),%xmm14
1008 movaps -0x48(%rax),%xmm15
1019 .size sha1_multi_block_avx2,.-sha1_multi_block_avx2
1025 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
1026 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
1028 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
1029 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
1030 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
1031 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
1032 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
1033 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
1034 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
1035 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
1038 foreach (split("\n",$code)) {
1039 s/\`([^\`]*)\`/eval($1)/ge;
1041 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1042 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1043 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go or
1044 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1045 s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go or
1046 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;