3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
20 # gcc 3.4.x(*) assembler
23 # Opteron 19.3 7.7 +150%
24 # Core2 17.8 8.1(**) +120%
26 # VIA Nano 21.8 10.1 +115%
28 # (*) comparison is not completely fair, because C results are
29 # for vanilla "256B" implementation, while assembler results
31 # (**) it's mystery [to me] why Core2 result is not same as for
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
63 # Sandy Bridge 1.80(+8%)
64 # Ivy Bridge 1.80(+7%)
65 # Haswell 0.55(+93%) (if system doesn't support AVX)
66 # Bulldozer 1.49(+27%)
67 # Silvermont 2.88(+13%)
71 # ... 8x aggregate factor AVX code path is using reduction algorithm
72 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
73 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
74 # sub-optimally in comparison to above mentioned version. But thanks
75 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
76 # it performs in 0.41 cycles per byte on Haswell processor.
78 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
82 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
84 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
86 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
87 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
88 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
89 die "can't locate x86_64-xlate.pl";
91 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
92 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
93 $avx = ($1>=2.19) + ($1>=2.22);
96 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
97 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
98 $avx = ($1>=2.09) + ($1>=2.10);
101 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
102 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
103 $avx = ($1>=10) + ($1>=11);
106 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
107 $avx = ($2>=3.0) + ($2>3.0);
110 open OUT,"| \"$^X\" $xlate $flavour $output";
115 # common register layout
126 # per-function register layout
130 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or
131 $r =~ s/%[er]([sd]i)/%\1l/ or
132 $r =~ s/%[er](bp)/%\1l/ or
133 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
135 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
136 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
138 $arg = "\$$arg" if ($arg*1 eq $arg);
139 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
150 mov `&LB("$Zlo")`,`&LB("$nlo")`
151 mov `&LB("$Zlo")`,`&LB("$nhi")`
152 shl \$4,`&LB("$nlo")`
154 mov 8($Htbl,$nlo),$Zlo
155 mov ($Htbl,$nlo),$Zhi
156 and \$0xf0,`&LB("$nhi")`
165 mov ($inp,$cnt),`&LB("$nlo")`
167 xor 8($Htbl,$nhi),$Zlo
169 xor ($Htbl,$nhi),$Zhi
170 mov `&LB("$nlo")`,`&LB("$nhi")`
171 xor ($rem_4bit,$rem,8),$Zhi
173 shl \$4,`&LB("$nlo")`
182 xor 8($Htbl,$nlo),$Zlo
184 xor ($Htbl,$nlo),$Zhi
185 and \$0xf0,`&LB("$nhi")`
186 xor ($rem_4bit,$rem,8),$Zhi
197 xor 8($Htbl,$nlo),$Zlo
199 xor ($Htbl,$nlo),$Zhi
200 and \$0xf0,`&LB("$nhi")`
201 xor ($rem_4bit,$rem,8),$Zhi
209 xor 8($Htbl,$nhi),$Zlo
211 xor ($Htbl,$nhi),$Zhi
213 xor ($rem_4bit,$rem,8),$Zhi
222 .extern OPENSSL_ia32cap_P
224 .globl gcm_gmult_4bit
225 .type gcm_gmult_4bit,\@function,2
229 push %rbp # %rbp and %r12 are pushed exclusively in
230 push %r12 # order to reuse Win64 exception handler...
234 lea .Lrem_4bit(%rip),$rem_4bit
245 .size gcm_gmult_4bit,.-gcm_gmult_4bit
248 # per-function register layout
254 .globl gcm_ghash_4bit
255 .type gcm_ghash_4bit,\@function,4
266 mov $inp,%r14 # reassign couple of args
272 my @nhi=("%ebx","%ecx");
273 my @rem=("%r12","%r13");
276 &sub ($Htbl,-128); # size optimization
277 &lea ($Hshr4,"16+128(%rsp)");
278 { my @lo =($nlo,$nhi);
282 for ($i=0,$j=-2;$i<18;$i++,$j++) {
283 &mov ("$j(%rsp)",&LB($dat)) if ($i>1);
284 &or ($lo[0],$tmp) if ($i>1);
285 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17);
286 &shr ($lo[1],4) if ($i>0 && $i<17);
287 &mov ($tmp,$hi[1]) if ($i>0 && $i<17);
288 &shr ($hi[1],4) if ($i>0 && $i<17);
289 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1);
290 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16);
291 &shl (&LB($dat),4) if ($i>0 && $i<17);
292 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1);
293 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16);
294 &shl ($tmp,60) if ($i>0 && $i<17);
296 push (@lo,shift(@lo));
297 push (@hi,shift(@hi));
301 &mov ($Zlo,"8($Xi)");
302 &mov ($Zhi,"0($Xi)");
303 &add ($len,$inp); # pointer to the end of data
304 &lea ($rem_8bit,".Lrem_8bit(%rip)");
305 &jmp (".Louter_loop");
307 $code.=".align 16\n.Louter_loop:\n";
308 &xor ($Zhi,"($inp)");
309 &mov ("%rdx","8($inp)");
310 &lea ($inp,"16($inp)");
313 &mov ("8($Xi)","%rdx");
318 &mov (&LB($nlo),&LB($dat));
319 &movz ($nhi[0],&LB($dat));
323 for ($j=11,$i=0;$i<15;$i++) {
325 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0);
326 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0);
327 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0);
328 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0);
330 &mov (&LB($nlo),&LB($dat));
331 &xor ($Zlo,$tmp) if ($i>0);
332 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0);
334 &movz ($nhi[1],&LB($dat));
336 &movzb ($rem[0],"(%rsp,$nhi[0])");
338 &shr ($nhi[1],4) if ($i<14);
339 &and ($nhi[1],0xf0) if ($i==14);
340 &shl ($rem[1],48) if ($i>0);
344 &xor ($Zhi,$rem[1]) if ($i>0);
347 &movz ($rem[0],&LB($rem[0]));
348 &mov ($dat,"$j($Xi)") if (--$j%4==0);
351 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)");
353 &xor ($Zhi,"($Hshr4,$nhi[0],8)");
355 unshift (@nhi,pop(@nhi)); # "rotate" registers
356 unshift (@rem,pop(@rem));
358 &movzw ($rem[1],"($rem_8bit,$rem[1],2)");
359 &xor ($Zlo,"8($Htbl,$nlo)");
360 &xor ($Zhi,"($Htbl,$nlo)");
366 &movz ($rem[0],&LB($Zlo));
370 &shl (&LB($rem[0]),4);
373 &xor ($Zlo,"8($Htbl,$nhi[0])");
374 &movzw ($rem[0],"($rem_8bit,$rem[0],2)");
377 &xor ($Zhi,"($Htbl,$nhi[0])");
386 &jb (".Louter_loop");
402 .size gcm_ghash_4bit,.-gcm_ghash_4bit
405 ######################################################################
408 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
409 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
411 ($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2";
412 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
414 sub clmul64x64_T2 { # minimal register pressure
415 my ($Xhi,$Xi,$Hkey,$HK)=@_;
417 if (!defined($HK)) { $HK = $T2;
420 pshufd \$0b01001110,$Xi,$T1
421 pshufd \$0b01001110,$Hkey,$T2
428 pshufd \$0b01001110,$Xi,$T1
433 pclmulqdq \$0x00,$Hkey,$Xi #######
434 pclmulqdq \$0x11,$Hkey,$Xhi #######
435 pclmulqdq \$0x00,$HK,$T1 #######
447 sub reduction_alg9 { # 17/11 times faster than Intel version
477 { my ($Htbl,$Xip)=@_4args;
481 .globl gcm_init_clmul
482 .type gcm_init_clmul,\@abi-omnipotent
487 $code.=<<___ if ($win64);
488 .LSEH_begin_gcm_init_clmul:
489 # I can't trust assembler to use specific encoding:-(
490 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp
491 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
495 pshufd \$0b01001110,$Hkey,$Hkey # dword swap
498 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword
503 pcmpgtd $T2,$T3 # broadcast carry bit
505 por $T1,$Hkey # H<<=1
508 pand .L0x1c2_polynomial(%rip),$T3
509 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial
512 pshufd \$0b01001110,$Hkey,$HK
516 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK);
517 &reduction_alg9 ($Xhi,$Xi);
519 pshufd \$0b01001110,$Hkey,$T1
520 pshufd \$0b01001110,$Xi,$T2
521 pxor $Hkey,$T1 # Karatsuba pre-processing
522 movdqu $Hkey,0x00($Htbl) # save H
523 pxor $Xi,$T2 # Karatsuba pre-processing
524 movdqu $Xi,0x10($Htbl) # save H^2
525 palignr \$8,$T1,$T2 # low part is H.lo^H.hi...
526 movdqu $T2,0x20($Htbl) # save Karatsuba "salt"
529 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3
530 &reduction_alg9 ($Xhi,$Xi);
534 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4
535 &reduction_alg9 ($Xhi,$Xi);
537 pshufd \$0b01001110,$T3,$T1
538 pshufd \$0b01001110,$Xi,$T2
539 pxor $T3,$T1 # Karatsuba pre-processing
540 movdqu $T3,0x30($Htbl) # save H^3
541 pxor $Xi,$T2 # Karatsuba pre-processing
542 movdqu $Xi,0x40($Htbl) # save H^4
543 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi...
544 movdqu $T2,0x50($Htbl) # save Karatsuba "salt"
547 $code.=<<___ if ($win64);
550 .LSEH_end_gcm_init_clmul:
554 .size gcm_init_clmul,.-gcm_init_clmul
558 { my ($Xip,$Htbl)=@_4args;
561 .globl gcm_gmult_clmul
562 .type gcm_gmult_clmul,\@abi-omnipotent
567 movdqa .Lbswap_mask(%rip),$T3
569 movdqu 0x20($Htbl),$T2
572 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2);
573 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
574 # experimental alternative. special thing about is that there
575 # no dependency between the two multiplications...
577 mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff
581 movq %r11,$T3 # borrow $T3
583 pshufb $T3,$T2 # ($Xi&7)·0xE0
585 pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1)
588 paddd $T2,$T2 # <<(64+56+1)
590 pclmulqdq \$0x01,$T3,$Xi
591 movdqa .Lbswap_mask(%rip),$T3 # reload $T3
601 .size gcm_gmult_clmul,.-gcm_gmult_clmul
605 { my ($Xip,$Htbl,$inp,$len)=@_4args;
606 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
607 my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
610 .globl gcm_ghash_clmul
611 .type gcm_ghash_clmul,\@abi-omnipotent
616 $code.=<<___ if ($win64);
618 .LSEH_begin_gcm_ghash_clmul:
619 # I can't trust assembler to use specific encoding:-(
620 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
621 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
622 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
623 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
624 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
625 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
626 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
627 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
628 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
629 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
630 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
633 movdqa .Lbswap_mask(%rip),$T3
637 movdqu 0x20($Htbl),$HK
643 movdqu 0x10($Htbl),$Hkey2
646 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
649 mov OPENSSL_ia32cap_P+4(%rip),%eax
653 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE
654 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE
658 mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff
659 movdqu 0x30($Htbl),$Hkey3
660 movdqu 0x40($Htbl),$Hkey4
663 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
665 movdqu 0x30($inp),$Xln
666 movdqu 0x20($inp),$Xl
670 pshufd \$0b01001110,$Xln,$Xmn
672 pclmulqdq \$0x00,$Hkey,$Xln
673 pclmulqdq \$0x11,$Hkey,$Xhn
674 pclmulqdq \$0x00,$HK,$Xmn
677 pshufd \$0b01001110,$Xl,$Xm
679 pclmulqdq \$0x00,$Hkey2,$Xl
680 pclmulqdq \$0x11,$Hkey2,$Xh
681 pclmulqdq \$0x10,$HK,$Xm
684 movups 0x50($Htbl),$HK
687 movdqu 0x10($inp),$Xl
692 pshufd \$0b01001110,$Xl,$Xm
695 pclmulqdq \$0x00,$Hkey3,$Xl
697 pshufd \$0b01001110,$Xi,$T1
699 pclmulqdq \$0x11,$Hkey3,$Xh
700 pclmulqdq \$0x00,$HK,$Xm
711 pclmulqdq \$0x00,$Hkey4,$Xi
713 movdqu 0x30($inp),$Xl
715 pclmulqdq \$0x11,$Hkey4,$Xhi
717 movdqu 0x20($inp),$Xln
719 pclmulqdq \$0x10,$HK,$T1
720 pshufd \$0b01001110,$Xl,$Xm
724 movups 0x20($Htbl),$HK
726 pclmulqdq \$0x00,$Hkey,$Xl
727 pshufd \$0b01001110,$Xln,$Xmn
729 pxor $Xi,$T1 # aggregated Karatsuba post-processing
734 pclmulqdq \$0x11,$Hkey,$Xh
738 movdqa .L7_mask(%rip),$T1
742 pand $Xi,$T1 # 1st phase
745 pclmulqdq \$0x00,$HK,$Xm
749 pclmulqdq \$0x00,$Hkey2,$Xln
755 movdqa $Xi,$T2 # 2nd phase
757 pclmulqdq \$0x11,$Hkey2,$Xhn
759 movdqu 0x10($inp),$Xl
761 pclmulqdq \$0x10,$HK,$Xmn
763 movups 0x50($Htbl),$HK
771 pshufd \$0b01001110,$Xl,$Xm
775 pclmulqdq \$0x00,$Hkey3,$Xl
779 pclmulqdq \$0x11,$Hkey3,$Xh
781 pshufd \$0b01001110,$Xi,$T1
784 pclmulqdq \$0x00,$HK,$Xm
792 pclmulqdq \$0x00,$Hkey4,$Xi
793 pclmulqdq \$0x11,$Hkey4,$Xhi
794 pclmulqdq \$0x10,$HK,$T1
798 pxor $Xi,$Xhi # aggregated Karatsuba post-processing
810 &reduction_alg9($Xhi,$Xi);
814 movdqu 0x20($Htbl),$HK
822 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
823 # [(H*Ii+1) + (H*Xi+1)] mod P =
824 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
826 movdqu ($inp),$T1 # Ii
827 movdqu 16($inp),$Xln # Ii+1
833 pshufd \$0b01001110,$Xln,$Xmn
835 pclmulqdq \$0x00,$Hkey,$Xln
836 pclmulqdq \$0x11,$Hkey,$Xhn
837 pclmulqdq \$0x00,$HK,$Xmn
839 lea 32($inp),$inp # i+=2
850 pshufd \$0b01001110,$Xi,$Xmn #
853 pclmulqdq \$0x00,$Hkey2,$Xi
854 pclmulqdq \$0x11,$Hkey2,$Xhi
855 pclmulqdq \$0x10,$HK,$Xmn
857 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi)
859 movdqu ($inp),$T2 # Ii
860 pxor $Xi,$T1 # aggregated Karatsuba post-processing
862 movdqu 16($inp),$Xln # Ii+1
865 pxor $T2,$Xhi # "Ii+Xi", consume early
876 movdqa $Xi,$T2 # 1st phase
880 pclmulqdq \$0x00,$Hkey,$Xln #######
888 pshufd \$0b01001110,$Xhn,$Xmn
892 movdqa $Xi,$T2 # 2nd phase
894 pclmulqdq \$0x11,$Hkey,$Xhn #######
901 pclmulqdq \$0x00,$HK,$Xmn #######
910 pshufd \$0b01001110,$Xi,$Xmn #
913 pclmulqdq \$0x00,$Hkey2,$Xi
914 pclmulqdq \$0x11,$Hkey2,$Xhi
915 pclmulqdq \$0x10,$HK,$Xmn
917 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi)
928 &reduction_alg9 ($Xhi,$Xi);
934 movdqu ($inp),$T1 # Ii
938 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi)
939 &reduction_alg9 ($Xhi,$Xi);
945 $code.=<<___ if ($win64);
947 movaps 0x10(%rsp),%xmm7
948 movaps 0x20(%rsp),%xmm8
949 movaps 0x30(%rsp),%xmm9
950 movaps 0x40(%rsp),%xmm10
951 movaps 0x50(%rsp),%xmm11
952 movaps 0x60(%rsp),%xmm12
953 movaps 0x70(%rsp),%xmm13
954 movaps 0x80(%rsp),%xmm14
955 movaps 0x90(%rsp),%xmm15
957 .LSEH_end_gcm_ghash_clmul:
961 .size gcm_ghash_clmul,.-gcm_ghash_clmul
967 .type gcm_init_avx,\@abi-omnipotent
972 my ($Htbl,$Xip)=@_4args;
975 $code.=<<___ if ($win64);
976 .LSEH_begin_gcm_init_avx:
977 # I can't trust assembler to use specific encoding:-(
978 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp
979 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
985 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap
988 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword
989 vpsrlq \$63,$Hkey,$T1
990 vpsllq \$1,$Hkey,$Hkey
992 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit
994 vpor $T1,$Hkey,$Hkey # H<<=1
997 vpand .L0x1c2_polynomial(%rip),$T3,$T3
998 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial
1000 vpunpckhqdq $Hkey,$Hkey,$HK
1003 mov \$4,%r10 # up to H^8
1004 jmp .Linit_start_avx
1007 sub clmul64x64_avx {
1008 my ($Xhi,$Xi,$Hkey,$HK)=@_;
1010 if (!defined($HK)) { $HK = $T2;
1012 vpunpckhqdq $Xi,$Xi,$T1
1013 vpunpckhqdq $Hkey,$Hkey,$T2
1019 vpunpckhqdq $Xi,$Xi,$T1
1024 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi #######
1025 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi #######
1026 vpclmulqdq \$0x00,$HK,$T1,$T1 #######
1027 vpxor $Xi,$Xhi,$T2 #
1030 vpslldq \$8,$T1,$T2 #
1041 vpsllq \$57,$Xi,$T1 # 1st phase
1046 vpslldq \$8,$T2,$T1 #
1051 vpsrlq \$1,$Xi,$T2 # 2nd phase
1056 vpsrlq \$1,$Xi,$Xi #
1057 vpxor $Xhi,$Xi,$Xi #
1064 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi...
1065 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt"
1067 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7
1068 &reduction_avx ($Xhi,$Xi);
1073 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8
1074 &reduction_avx ($Xhi,$Xi);
1076 vpshufd \$0b01001110,$T3,$T1
1077 vpshufd \$0b01001110,$Xi,$T2
1078 vpxor $T3,$T1,$T1 # Karatsuba pre-processing
1079 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7
1080 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing
1081 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8
1082 lea 0x30($Htbl),$Htbl
1086 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped
1087 vmovdqu $T3,-0x10($Htbl)
1091 $code.=<<___ if ($win64);
1094 .LSEH_end_gcm_init_avx:
1098 .size gcm_init_avx,.-gcm_init_avx
1103 .size gcm_init_avx,.-gcm_init_avx
1108 .globl gcm_gmult_avx
1109 .type gcm_gmult_avx,\@abi-omnipotent
1113 .size gcm_gmult_avx,.-gcm_gmult_avx
1117 .globl gcm_ghash_avx
1118 .type gcm_ghash_avx,\@abi-omnipotent
1123 my ($Xip,$Htbl,$inp,$len)=@_4args;
1127 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1129 $code.=<<___ if ($win64);
1130 lea -0x88(%rsp),%rax
1131 .LSEH_begin_gcm_ghash_avx:
1132 # I can't trust assembler to use specific encoding:-(
1133 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
1134 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
1135 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
1136 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
1137 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
1138 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
1139 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
1140 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
1141 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
1142 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
1143 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
1148 vmovdqu ($Xip),$Xi # load $Xi
1149 lea .L0x1c2_polynomial(%rip),%r10
1150 lea 0x40($Htbl),$Htbl # size optimization
1151 vmovdqu .Lbswap_mask(%rip),$bswap
1152 vpshufb $bswap,$Xi,$Xi
1157 vmovdqu 0x70($inp),$Ii # I[7]
1158 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1159 vpshufb $bswap,$Ii,$Ii
1160 vmovdqu 0x20-0x40($Htbl),$HK
1162 vpunpckhqdq $Ii,$Ii,$T2
1163 vmovdqu 0x60($inp),$Ij # I[6]
1164 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1166 vpshufb $bswap,$Ij,$Ij
1167 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1168 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1169 vpunpckhqdq $Ij,$Ij,$T1
1170 vmovdqu 0x50($inp),$Ii # I[5]
1171 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1174 vpshufb $bswap,$Ii,$Ii
1175 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1176 vpunpckhqdq $Ii,$Ii,$T2
1177 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1178 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1180 vmovdqu 0x40($inp),$Ij # I[4]
1181 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1182 vmovdqu 0x50-0x40($Htbl),$HK
1184 vpshufb $bswap,$Ij,$Ij
1185 vpxor $Xlo,$Zlo,$Zlo
1186 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1187 vpxor $Xhi,$Zhi,$Zhi
1188 vpunpckhqdq $Ij,$Ij,$T1
1189 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1190 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1191 vpxor $Xmi,$Zmi,$Zmi
1192 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1195 vmovdqu 0x30($inp),$Ii # I[3]
1196 vpxor $Zlo,$Xlo,$Xlo
1197 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1198 vpxor $Zhi,$Xhi,$Xhi
1199 vpshufb $bswap,$Ii,$Ii
1200 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1201 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1202 vpxor $Zmi,$Xmi,$Xmi
1203 vpunpckhqdq $Ii,$Ii,$T2
1204 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1205 vmovdqu 0x80-0x40($Htbl),$HK
1208 vmovdqu 0x20($inp),$Ij # I[2]
1209 vpxor $Xlo,$Zlo,$Zlo
1210 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1211 vpxor $Xhi,$Zhi,$Zhi
1212 vpshufb $bswap,$Ij,$Ij
1213 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1214 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1215 vpxor $Xmi,$Zmi,$Zmi
1216 vpunpckhqdq $Ij,$Ij,$T1
1217 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1220 vmovdqu 0x10($inp),$Ii # I[1]
1221 vpxor $Zlo,$Xlo,$Xlo
1222 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1223 vpxor $Zhi,$Xhi,$Xhi
1224 vpshufb $bswap,$Ii,$Ii
1225 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1226 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1227 vpxor $Zmi,$Xmi,$Xmi
1228 vpunpckhqdq $Ii,$Ii,$T2
1229 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1230 vmovdqu 0xb0-0x40($Htbl),$HK
1233 vmovdqu ($inp),$Ij # I[0]
1234 vpxor $Xlo,$Zlo,$Zlo
1235 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1236 vpxor $Xhi,$Zhi,$Zhi
1237 vpshufb $bswap,$Ij,$Ij
1238 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1239 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8
1240 vpxor $Xmi,$Zmi,$Zmi
1241 vpclmulqdq \$0x10,$HK,$T2,$Xmi
1247 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1253 vpunpckhqdq $Ij,$Ij,$T1
1254 vmovdqu 0x70($inp),$Ii # I[7]
1255 vpxor $Xlo,$Zlo,$Zlo
1257 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi
1258 vpshufb $bswap,$Ii,$Ii
1259 vpxor $Xhi,$Zhi,$Zhi
1260 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo
1261 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1262 vpunpckhqdq $Ii,$Ii,$T2
1263 vpxor $Xmi,$Zmi,$Zmi
1264 vpclmulqdq \$0x00,$HK,$T1,$Tred
1265 vmovdqu 0x20-0x40($Htbl),$HK
1268 vmovdqu 0x60($inp),$Ij # I[6]
1269 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1270 vpxor $Zlo,$Xi,$Xi # collect result
1271 vpshufb $bswap,$Ij,$Ij
1272 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1274 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1275 vpunpckhqdq $Ij,$Ij,$T1
1276 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1277 vpxor $Zmi,$Tred,$Tred
1280 vmovdqu 0x50($inp),$Ii # I[5]
1281 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing
1282 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1283 vpxor $Xo,$Tred,$Tred
1284 vpslldq \$8,$Tred,$T2
1285 vpxor $Xlo,$Zlo,$Zlo
1286 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1287 vpsrldq \$8,$Tred,$Tred
1289 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1290 vpshufb $bswap,$Ii,$Ii
1291 vxorps $Tred,$Xo, $Xo
1292 vpxor $Xhi,$Zhi,$Zhi
1293 vpunpckhqdq $Ii,$Ii,$T2
1294 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1295 vmovdqu 0x50-0x40($Htbl),$HK
1297 vpxor $Xmi,$Zmi,$Zmi
1299 vmovdqu 0x40($inp),$Ij # I[4]
1300 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase
1301 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1302 vpshufb $bswap,$Ij,$Ij
1303 vpxor $Zlo,$Xlo,$Xlo
1304 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1305 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1306 vpunpckhqdq $Ij,$Ij,$T1
1307 vpxor $Zhi,$Xhi,$Xhi
1308 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1310 vpxor $Zmi,$Xmi,$Xmi
1312 vmovdqu 0x30($inp),$Ii # I[3]
1313 vpclmulqdq \$0x10,(%r10),$Xi,$Xi
1314 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1315 vpshufb $bswap,$Ii,$Ii
1316 vpxor $Xlo,$Zlo,$Zlo
1317 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1318 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1319 vpunpckhqdq $Ii,$Ii,$T2
1320 vpxor $Xhi,$Zhi,$Zhi
1321 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1322 vmovdqu 0x80-0x40($Htbl),$HK
1324 vpxor $Xmi,$Zmi,$Zmi
1326 vmovdqu 0x20($inp),$Ij # I[2]
1327 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1328 vpshufb $bswap,$Ij,$Ij
1329 vpxor $Zlo,$Xlo,$Xlo
1330 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1331 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1332 vpunpckhqdq $Ij,$Ij,$T1
1333 vpxor $Zhi,$Xhi,$Xhi
1334 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1336 vpxor $Zmi,$Xmi,$Xmi
1337 vxorps $Tred,$Xi,$Xi
1339 vmovdqu 0x10($inp),$Ii # I[1]
1340 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase
1341 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1342 vpshufb $bswap,$Ii,$Ii
1343 vpxor $Xlo,$Zlo,$Zlo
1344 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1345 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1346 vpclmulqdq \$0x10,(%r10),$Xi,$Xi
1347 vxorps $Xo,$Tred,$Tred
1348 vpunpckhqdq $Ii,$Ii,$T2
1349 vpxor $Xhi,$Zhi,$Zhi
1350 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1351 vmovdqu 0xb0-0x40($Htbl),$HK
1353 vpxor $Xmi,$Zmi,$Zmi
1355 vmovdqu ($inp),$Ij # I[0]
1356 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1357 vpshufb $bswap,$Ij,$Ij
1358 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1359 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8
1361 vpclmulqdq \$0x10,$HK, $T2,$Xmi
1362 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1369 jmp .Ltail_no_xor_avx
1373 vmovdqu -0x10($inp,$len),$Ii # very last word
1374 lea ($inp,$len),$inp
1375 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1376 vmovdqu 0x20-0x40($Htbl),$HK
1377 vpshufb $bswap,$Ii,$Ij
1379 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo,
1380 vmovdqa $Xhi,$Zhi # $Zhi and
1381 vmovdqa $Xmi,$Zmi # $Zmi
1385 vpunpckhqdq $Ij,$Ij,$T1
1386 vpxor $Xlo,$Zlo,$Zlo
1387 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1389 vmovdqu -0x20($inp),$Ii
1390 vpxor $Xhi,$Zhi,$Zhi
1391 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1392 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1393 vpshufb $bswap,$Ii,$Ij
1394 vpxor $Xmi,$Zmi,$Zmi
1395 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1400 vpunpckhqdq $Ij,$Ij,$T1
1401 vpxor $Xlo,$Zlo,$Zlo
1402 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1404 vmovdqu -0x30($inp),$Ii
1405 vpxor $Xhi,$Zhi,$Zhi
1406 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1407 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1408 vpshufb $bswap,$Ii,$Ij
1409 vpxor $Xmi,$Zmi,$Zmi
1410 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1411 vmovdqu 0x50-0x40($Htbl),$HK
1415 vpunpckhqdq $Ij,$Ij,$T1
1416 vpxor $Xlo,$Zlo,$Zlo
1417 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1419 vmovdqu -0x40($inp),$Ii
1420 vpxor $Xhi,$Zhi,$Zhi
1421 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1422 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1423 vpshufb $bswap,$Ii,$Ij
1424 vpxor $Xmi,$Zmi,$Zmi
1425 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1430 vpunpckhqdq $Ij,$Ij,$T1
1431 vpxor $Xlo,$Zlo,$Zlo
1432 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1434 vmovdqu -0x50($inp),$Ii
1435 vpxor $Xhi,$Zhi,$Zhi
1436 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1437 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1438 vpshufb $bswap,$Ii,$Ij
1439 vpxor $Xmi,$Zmi,$Zmi
1440 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1441 vmovdqu 0x80-0x40($Htbl),$HK
1445 vpunpckhqdq $Ij,$Ij,$T1
1446 vpxor $Xlo,$Zlo,$Zlo
1447 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1449 vmovdqu -0x60($inp),$Ii
1450 vpxor $Xhi,$Zhi,$Zhi
1451 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1452 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1453 vpshufb $bswap,$Ii,$Ij
1454 vpxor $Xmi,$Zmi,$Zmi
1455 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1460 vpunpckhqdq $Ij,$Ij,$T1
1461 vpxor $Xlo,$Zlo,$Zlo
1462 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1464 vmovdqu -0x70($inp),$Ii
1465 vpxor $Xhi,$Zhi,$Zhi
1466 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1467 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1468 vpshufb $bswap,$Ii,$Ij
1469 vpxor $Xmi,$Zmi,$Zmi
1470 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1471 vmovq 0xb8-0x40($Htbl),$HK
1477 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1479 vpunpckhqdq $Ij,$Ij,$T1
1480 vpxor $Xlo,$Zlo,$Zlo
1481 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1483 vpxor $Xhi,$Zhi,$Zhi
1484 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1485 vpxor $Xmi,$Zmi,$Zmi
1486 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1488 vmovdqu (%r10),$Tred
1492 vpxor $Xmi,$Zmi,$Zmi
1494 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing
1495 vpxor $Xo, $Zmi,$Zmi
1496 vpslldq \$8, $Zmi,$T2
1497 vpsrldq \$8, $Zmi,$Zmi
1501 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase
1502 vpalignr \$8,$Xi,$Xi,$Xi
1505 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase
1506 vpalignr \$8,$Xi,$Xi,$Xi
1513 vpshufb $bswap,$Xi,$Xi
1517 $code.=<<___ if ($win64);
1519 movaps 0x10(%rsp),%xmm7
1520 movaps 0x20(%rsp),%xmm8
1521 movaps 0x30(%rsp),%xmm9
1522 movaps 0x40(%rsp),%xmm10
1523 movaps 0x50(%rsp),%xmm11
1524 movaps 0x60(%rsp),%xmm12
1525 movaps 0x70(%rsp),%xmm13
1526 movaps 0x80(%rsp),%xmm14
1527 movaps 0x90(%rsp),%xmm15
1529 .LSEH_end_gcm_ghash_avx:
1533 .size gcm_ghash_avx,.-gcm_ghash_avx
1538 .size gcm_ghash_avx,.-gcm_ghash_avx
1545 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1547 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1551 .long 7,0,`0xE1<<1`,0
1553 .type .Lrem_4bit,\@object
1555 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1556 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1557 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1558 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1559 .type .Lrem_8bit,\@object
1561 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1562 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1563 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1564 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1565 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1566 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1567 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1568 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1569 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1570 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1571 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1572 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1573 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1574 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1575 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1576 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1577 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1578 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1579 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1580 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1581 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1582 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1583 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1584 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1585 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1586 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1587 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1588 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1589 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1590 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1591 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1592 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1594 .asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1598 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1599 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1607 .extern __imp_RtlVirtualUnwind
1608 .type se_handler,\@abi-omnipotent
1622 mov 120($context),%rax # pull context->Rax
1623 mov 248($context),%rbx # pull context->Rip
1625 mov 8($disp),%rsi # disp->ImageBase
1626 mov 56($disp),%r11 # disp->HandlerData
1628 mov 0(%r11),%r10d # HandlerData[0]
1629 lea (%rsi,%r10),%r10 # prologue label
1630 cmp %r10,%rbx # context->Rip<prologue label
1633 mov 152($context),%rax # pull context->Rsp
1635 mov 4(%r11),%r10d # HandlerData[1]
1636 lea (%rsi,%r10),%r10 # epilogue label
1637 cmp %r10,%rbx # context->Rip>=epilogue label
1640 lea 24(%rax),%rax # adjust "rsp"
1645 mov %rbx,144($context) # restore context->Rbx
1646 mov %rbp,160($context) # restore context->Rbp
1647 mov %r12,216($context) # restore context->R12
1652 mov %rax,152($context) # restore context->Rsp
1653 mov %rsi,168($context) # restore context->Rsi
1654 mov %rdi,176($context) # restore context->Rdi
1656 mov 40($disp),%rdi # disp->ContextRecord
1657 mov $context,%rsi # context
1658 mov \$`1232/8`,%ecx # sizeof(CONTEXT)
1659 .long 0xa548f3fc # cld; rep movsq
1662 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1663 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1664 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1665 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1666 mov 40(%rsi),%r10 # disp->ContextRecord
1667 lea 56(%rsi),%r11 # &disp->HandlerData
1668 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1669 mov %r10,32(%rsp) # arg5
1670 mov %r11,40(%rsp) # arg6
1671 mov %r12,48(%rsp) # arg7
1672 mov %rcx,56(%rsp) # arg8, (NULL)
1673 call *__imp_RtlVirtualUnwind(%rip)
1675 mov \$1,%eax # ExceptionContinueSearch
1687 .size se_handler,.-se_handler
1691 .rva .LSEH_begin_gcm_gmult_4bit
1692 .rva .LSEH_end_gcm_gmult_4bit
1693 .rva .LSEH_info_gcm_gmult_4bit
1695 .rva .LSEH_begin_gcm_ghash_4bit
1696 .rva .LSEH_end_gcm_ghash_4bit
1697 .rva .LSEH_info_gcm_ghash_4bit
1699 .rva .LSEH_begin_gcm_init_clmul
1700 .rva .LSEH_end_gcm_init_clmul
1701 .rva .LSEH_info_gcm_init_clmul
1703 .rva .LSEH_begin_gcm_ghash_clmul
1704 .rva .LSEH_end_gcm_ghash_clmul
1705 .rva .LSEH_info_gcm_ghash_clmul
1707 $code.=<<___ if ($avx);
1708 .rva .LSEH_begin_gcm_init_avx
1709 .rva .LSEH_end_gcm_init_avx
1710 .rva .LSEH_info_gcm_init_clmul
1712 .rva .LSEH_begin_gcm_ghash_avx
1713 .rva .LSEH_end_gcm_ghash_avx
1714 .rva .LSEH_info_gcm_ghash_clmul
1719 .LSEH_info_gcm_gmult_4bit:
1722 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData
1723 .LSEH_info_gcm_ghash_4bit:
1726 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData
1727 .LSEH_info_gcm_init_clmul:
1728 .byte 0x01,0x08,0x03,0x00
1729 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1730 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18
1731 .LSEH_info_gcm_ghash_clmul:
1732 .byte 0x01,0x33,0x16,0x00
1733 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
1734 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
1735 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
1736 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
1737 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
1738 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
1739 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
1740 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
1741 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
1742 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1743 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
1747 $code =~ s/\`([^\`]*)\`/eval($1)/gem;