Merge branch 'master' of git.openssl.org:openssl
[openssl.git] / crypto / modes / asm / ghash-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # March, June 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
19 #
20 #               gcc 3.4.x(*)    assembler
21 #
22 # P4            28.6            14.0            +100%
23 # Opteron       19.3            7.7             +150%
24 # Core2         17.8            8.1(**)         +120%
25 # Atom          31.6            16.8            +88%
26 # VIA Nano      21.8            10.1            +115%
27 #
28 # (*)   comparison is not completely fair, because C results are
29 #       for vanilla "256B" implementation, while assembler results
30 #       are for "528B";-)
31 # (**)  it's mystery [to me] why Core2 result is not same as for
32 #       Opteron;
33
34 # May 2010
35 #
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
38 # techniques.
39 #
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
43
44 # December 2012
45 #
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
61 #
62 # Westmere      1.78(+13%)
63 # Sandy Bridge  1.80(+8%)
64 # Ivy Bridge    1.80(+7%)
65 # Haswell       0.55(+93%) (if system doesn't support AVX)
66 # Bulldozer     1.49(+27%)
67
68 # March 2013
69 #
70 # ... 8x aggregate factor AVX code path is using reduction algorithm
71 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
72 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
73 # sub-optimally in comparison to above mentioned version. But thanks
74 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
75 # it performs in 0.41 cycles per byte on Haswell processor.
76 #
77 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
78
79 $flavour = shift;
80 $output  = shift;
81 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
82
83 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
84
85 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
86 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
87 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
88 die "can't locate x86_64-xlate.pl";
89
90 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
91                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
92         $avx = ($1>=2.19) + ($1>=2.22);
93 }
94
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97         $avx = ($1>=2.09) + ($1>=2.10);
98 }
99
100 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
101             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
102         $avx = ($1>=10) + ($1>=11);
103 }
104
105 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
106         $avx = ($2>=3.0) + ($2>3.0);
107 }
108
109 open OUT,"| \"$^X\" $xlate $flavour $output";
110 *STDOUT=*OUT;
111
112 $do4xaggr=1;
113
114 # common register layout
115 $nlo="%rax";
116 $nhi="%rbx";
117 $Zlo="%r8";
118 $Zhi="%r9";
119 $tmp="%r10";
120 $rem_4bit = "%r11";
121
122 $Xi="%rdi";
123 $Htbl="%rsi";
124
125 # per-function register layout
126 $cnt="%rcx";
127 $rem="%rdx";
128
129 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/     or
130                         $r =~ s/%[er]([sd]i)/%\1l/      or
131                         $r =~ s/%[er](bp)/%\1l/         or
132                         $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
133
134 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
135 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
136   my $arg = pop;
137     $arg = "\$$arg" if ($arg*1 eq $arg);
138     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
139 }
140 \f
141 { my $N;
142   sub loop() {
143   my $inp = shift;
144
145         $N++;
146 $code.=<<___;
147         xor     $nlo,$nlo
148         xor     $nhi,$nhi
149         mov     `&LB("$Zlo")`,`&LB("$nlo")`
150         mov     `&LB("$Zlo")`,`&LB("$nhi")`
151         shl     \$4,`&LB("$nlo")`
152         mov     \$14,$cnt
153         mov     8($Htbl,$nlo),$Zlo
154         mov     ($Htbl,$nlo),$Zhi
155         and     \$0xf0,`&LB("$nhi")`
156         mov     $Zlo,$rem
157         jmp     .Loop$N
158
159 .align  16
160 .Loop$N:
161         shr     \$4,$Zlo
162         and     \$0xf,$rem
163         mov     $Zhi,$tmp
164         mov     ($inp,$cnt),`&LB("$nlo")`
165         shr     \$4,$Zhi
166         xor     8($Htbl,$nhi),$Zlo
167         shl     \$60,$tmp
168         xor     ($Htbl,$nhi),$Zhi
169         mov     `&LB("$nlo")`,`&LB("$nhi")`
170         xor     ($rem_4bit,$rem,8),$Zhi
171         mov     $Zlo,$rem
172         shl     \$4,`&LB("$nlo")`
173         xor     $tmp,$Zlo
174         dec     $cnt
175         js      .Lbreak$N
176
177         shr     \$4,$Zlo
178         and     \$0xf,$rem
179         mov     $Zhi,$tmp
180         shr     \$4,$Zhi
181         xor     8($Htbl,$nlo),$Zlo
182         shl     \$60,$tmp
183         xor     ($Htbl,$nlo),$Zhi
184         and     \$0xf0,`&LB("$nhi")`
185         xor     ($rem_4bit,$rem,8),$Zhi
186         mov     $Zlo,$rem
187         xor     $tmp,$Zlo
188         jmp     .Loop$N
189
190 .align  16
191 .Lbreak$N:
192         shr     \$4,$Zlo
193         and     \$0xf,$rem
194         mov     $Zhi,$tmp
195         shr     \$4,$Zhi
196         xor     8($Htbl,$nlo),$Zlo
197         shl     \$60,$tmp
198         xor     ($Htbl,$nlo),$Zhi
199         and     \$0xf0,`&LB("$nhi")`
200         xor     ($rem_4bit,$rem,8),$Zhi
201         mov     $Zlo,$rem
202         xor     $tmp,$Zlo
203
204         shr     \$4,$Zlo
205         and     \$0xf,$rem
206         mov     $Zhi,$tmp
207         shr     \$4,$Zhi
208         xor     8($Htbl,$nhi),$Zlo
209         shl     \$60,$tmp
210         xor     ($Htbl,$nhi),$Zhi
211         xor     $tmp,$Zlo
212         xor     ($rem_4bit,$rem,8),$Zhi
213
214         bswap   $Zlo
215         bswap   $Zhi
216 ___
217 }}
218
219 $code=<<___;
220 .text
221 .extern OPENSSL_ia32cap_P
222
223 .globl  gcm_gmult_4bit
224 .type   gcm_gmult_4bit,\@function,2
225 .align  16
226 gcm_gmult_4bit:
227         push    %rbx
228         push    %rbp            # %rbp and %r12 are pushed exclusively in
229         push    %r12            # order to reuse Win64 exception handler...
230 .Lgmult_prologue:
231
232         movzb   15($Xi),$Zlo
233         lea     .Lrem_4bit(%rip),$rem_4bit
234 ___
235         &loop   ($Xi);
236 $code.=<<___;
237         mov     $Zlo,8($Xi)
238         mov     $Zhi,($Xi)
239
240         mov     16(%rsp),%rbx
241         lea     24(%rsp),%rsp
242 .Lgmult_epilogue:
243         ret
244 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
245 ___
246 \f
247 # per-function register layout
248 $inp="%rdx";
249 $len="%rcx";
250 $rem_8bit=$rem_4bit;
251
252 $code.=<<___;
253 .globl  gcm_ghash_4bit
254 .type   gcm_ghash_4bit,\@function,4
255 .align  16
256 gcm_ghash_4bit:
257         push    %rbx
258         push    %rbp
259         push    %r12
260         push    %r13
261         push    %r14
262         push    %r15
263         sub     \$280,%rsp
264 .Lghash_prologue:
265         mov     $inp,%r14               # reassign couple of args
266         mov     $len,%r15
267 ___
268 { my $inp="%r14";
269   my $dat="%edx";
270   my $len="%r15";
271   my @nhi=("%ebx","%ecx");
272   my @rem=("%r12","%r13");
273   my $Hshr4="%rbp";
274
275         &sub    ($Htbl,-128);           # size optimization
276         &lea    ($Hshr4,"16+128(%rsp)");
277         { my @lo =($nlo,$nhi);
278           my @hi =($Zlo,$Zhi);
279
280           &xor  ($dat,$dat);
281           for ($i=0,$j=-2;$i<18;$i++,$j++) {
282             &mov        ("$j(%rsp)",&LB($dat))          if ($i>1);
283             &or         ($lo[0],$tmp)                   if ($i>1);
284             &mov        (&LB($dat),&LB($lo[1]))         if ($i>0 && $i<17);
285             &shr        ($lo[1],4)                      if ($i>0 && $i<17);
286             &mov        ($tmp,$hi[1])                   if ($i>0 && $i<17);
287             &shr        ($hi[1],4)                      if ($i>0 && $i<17);
288             &mov        ("8*$j($Hshr4)",$hi[0])         if ($i>1);
289             &mov        ($hi[0],"16*$i+0-128($Htbl)")   if ($i<16);
290             &shl        (&LB($dat),4)                   if ($i>0 && $i<17);
291             &mov        ("8*$j-128($Hshr4)",$lo[0])     if ($i>1);
292             &mov        ($lo[0],"16*$i+8-128($Htbl)")   if ($i<16);
293             &shl        ($tmp,60)                       if ($i>0 && $i<17);
294
295             push        (@lo,shift(@lo));
296             push        (@hi,shift(@hi));
297           }
298         }
299         &add    ($Htbl,-128);
300         &mov    ($Zlo,"8($Xi)");
301         &mov    ($Zhi,"0($Xi)");
302         &add    ($len,$inp);            # pointer to the end of data
303         &lea    ($rem_8bit,".Lrem_8bit(%rip)");
304         &jmp    (".Louter_loop");
305
306 $code.=".align  16\n.Louter_loop:\n";
307         &xor    ($Zhi,"($inp)");
308         &mov    ("%rdx","8($inp)");
309         &lea    ($inp,"16($inp)");
310         &xor    ("%rdx",$Zlo);
311         &mov    ("($Xi)",$Zhi);
312         &mov    ("8($Xi)","%rdx");
313         &shr    ("%rdx",32);
314
315         &xor    ($nlo,$nlo);
316         &rol    ($dat,8);
317         &mov    (&LB($nlo),&LB($dat));
318         &movz   ($nhi[0],&LB($dat));
319         &shl    (&LB($nlo),4);
320         &shr    ($nhi[0],4);
321
322         for ($j=11,$i=0;$i<15;$i++) {
323             &rol        ($dat,8);
324             &xor        ($Zlo,"8($Htbl,$nlo)")                  if ($i>0);
325             &xor        ($Zhi,"($Htbl,$nlo)")                   if ($i>0);
326             &mov        ($Zlo,"8($Htbl,$nlo)")                  if ($i==0);
327             &mov        ($Zhi,"($Htbl,$nlo)")                   if ($i==0);
328
329             &mov        (&LB($nlo),&LB($dat));
330             &xor        ($Zlo,$tmp)                             if ($i>0);
331             &movzw      ($rem[1],"($rem_8bit,$rem[1],2)")       if ($i>0);
332
333             &movz       ($nhi[1],&LB($dat));
334             &shl        (&LB($nlo),4);
335             &movzb      ($rem[0],"(%rsp,$nhi[0])");
336
337             &shr        ($nhi[1],4)                             if ($i<14);
338             &and        ($nhi[1],0xf0)                          if ($i==14);
339             &shl        ($rem[1],48)                            if ($i>0);
340             &xor        ($rem[0],$Zlo);
341
342             &mov        ($tmp,$Zhi);
343             &xor        ($Zhi,$rem[1])                          if ($i>0);
344             &shr        ($Zlo,8);
345
346             &movz       ($rem[0],&LB($rem[0]));
347             &mov        ($dat,"$j($Xi)")                        if (--$j%4==0);
348             &shr        ($Zhi,8);
349
350             &xor        ($Zlo,"-128($Hshr4,$nhi[0],8)");
351             &shl        ($tmp,56);
352             &xor        ($Zhi,"($Hshr4,$nhi[0],8)");
353
354             unshift     (@nhi,pop(@nhi));               # "rotate" registers
355             unshift     (@rem,pop(@rem));
356         }
357         &movzw  ($rem[1],"($rem_8bit,$rem[1],2)");
358         &xor    ($Zlo,"8($Htbl,$nlo)");
359         &xor    ($Zhi,"($Htbl,$nlo)");
360
361         &shl    ($rem[1],48);
362         &xor    ($Zlo,$tmp);
363
364         &xor    ($Zhi,$rem[1]);
365         &movz   ($rem[0],&LB($Zlo));
366         &shr    ($Zlo,4);
367
368         &mov    ($tmp,$Zhi);
369         &shl    (&LB($rem[0]),4);
370         &shr    ($Zhi,4);
371
372         &xor    ($Zlo,"8($Htbl,$nhi[0])");
373         &movzw  ($rem[0],"($rem_8bit,$rem[0],2)");
374         &shl    ($tmp,60);
375
376         &xor    ($Zhi,"($Htbl,$nhi[0])");
377         &xor    ($Zlo,$tmp);
378         &shl    ($rem[0],48);
379
380         &bswap  ($Zlo);
381         &xor    ($Zhi,$rem[0]);
382
383         &bswap  ($Zhi);
384         &cmp    ($inp,$len);
385         &jb     (".Louter_loop");
386 }
387 $code.=<<___;
388         mov     $Zlo,8($Xi)
389         mov     $Zhi,($Xi)
390
391         lea     280(%rsp),%rsi
392         mov     0(%rsi),%r15
393         mov     8(%rsi),%r14
394         mov     16(%rsi),%r13
395         mov     24(%rsi),%r12
396         mov     32(%rsi),%rbp
397         mov     40(%rsi),%rbx
398         lea     48(%rsi),%rsp
399 .Lghash_epilogue:
400         ret
401 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
402 ___
403 \f
404 ######################################################################
405 # PCLMULQDQ version.
406
407 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
408                 ("%rdi","%rsi","%rdx","%rcx");  # Unix order
409
410 ($Xi,$Xhi)=("%xmm0","%xmm1");   $Hkey="%xmm2";
411 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
412
413 sub clmul64x64_T2 {     # minimal register pressure
414 my ($Xhi,$Xi,$Hkey,$HK)=@_;
415
416 if (!defined($HK)) {    $HK = $T2;
417 $code.=<<___;
418         movdqa          $Xi,$Xhi                #
419         pshufd          \$0b01001110,$Xi,$T1
420         pshufd          \$0b01001110,$Hkey,$T2
421         pxor            $Xi,$T1                 #
422         pxor            $Hkey,$T2
423 ___
424 } else {
425 $code.=<<___;
426         movdqa          $Xi,$Xhi                #
427         pshufd          \$0b01001110,$Xi,$T1
428         pxor            $Xi,$T1                 #
429 ___
430 }
431 $code.=<<___;
432         pclmulqdq       \$0x00,$Hkey,$Xi        #######
433         pclmulqdq       \$0x11,$Hkey,$Xhi       #######
434         pclmulqdq       \$0x00,$HK,$T1          #######
435         pxor            $Xi,$T1                 #
436         pxor            $Xhi,$T1                #
437
438         movdqa          $T1,$T2                 #
439         psrldq          \$8,$T1
440         pslldq          \$8,$T2                 #
441         pxor            $T1,$Xhi
442         pxor            $T2,$Xi                 #
443 ___
444 }
445
446 sub reduction_alg9 {    # 17/11 times faster than Intel version
447 my ($Xhi,$Xi) = @_;
448
449 $code.=<<___;
450         # 1st phase
451         movdqa          $Xi,$T2                 #
452         movdqa          $Xi,$T1
453         psllq           \$5,$Xi
454         pxor            $Xi,$T1                 #
455         psllq           \$1,$Xi
456         pxor            $T1,$Xi                 #
457         psllq           \$57,$Xi                #
458         movdqa          $Xi,$T1                 #
459         pslldq          \$8,$Xi
460         psrldq          \$8,$T1                 #       
461         pxor            $T2,$Xi
462         pxor            $T1,$Xhi                #
463
464         # 2nd phase
465         movdqa          $Xi,$T2
466         psrlq           \$1,$Xi
467         pxor            $T2,$Xhi                #
468         pxor            $Xi,$T2
469         psrlq           \$5,$Xi
470         pxor            $T2,$Xi                 #
471         psrlq           \$1,$Xi                 #
472         pxor            $Xhi,$Xi                #
473 ___
474 }
475 \f
476 { my ($Htbl,$Xip)=@_4args;
477   my $HK="%xmm6";
478
479 $code.=<<___;
480 .globl  gcm_init_clmul
481 .type   gcm_init_clmul,\@abi-omnipotent
482 .align  16
483 gcm_init_clmul:
484 .L_init_clmul:
485 ___
486 $code.=<<___ if ($win64);
487 .LSEH_begin_gcm_init_clmul:
488         # I can't trust assembler to use specific encoding:-(
489         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
490         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
491 ___
492 $code.=<<___;
493         movdqu          ($Xip),$Hkey
494         pshufd          \$0b01001110,$Hkey,$Hkey        # dword swap
495
496         # <<1 twist
497         pshufd          \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
498         movdqa          $Hkey,$T1
499         psllq           \$1,$Hkey
500         pxor            $T3,$T3                 #
501         psrlq           \$63,$T1
502         pcmpgtd         $T2,$T3                 # broadcast carry bit
503         pslldq          \$8,$T1
504         por             $T1,$Hkey               # H<<=1
505
506         # magic reduction
507         pand            .L0x1c2_polynomial(%rip),$T3
508         pxor            $T3,$Hkey               # if(carry) H^=0x1c2_polynomial
509
510         # calculate H^2
511         pshufd          \$0b01001110,$Hkey,$HK
512         movdqa          $Hkey,$Xi
513         pxor            $Hkey,$HK
514 ___
515         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);
516         &reduction_alg9 ($Xhi,$Xi);
517 $code.=<<___;
518         pshufd          \$0b01001110,$Hkey,$T1
519         pshufd          \$0b01001110,$Xi,$T2
520         pxor            $Hkey,$T1               # Karatsuba pre-processing
521         movdqu          $Hkey,0x00($Htbl)       # save H
522         pxor            $Xi,$T2                 # Karatsuba pre-processing
523         movdqu          $Xi,0x10($Htbl)         # save H^2
524         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
525         movdqu          $T2,0x20($Htbl)         # save Karatsuba "salt"
526 ___
527 if ($do4xaggr) {
528         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^3
529         &reduction_alg9 ($Xhi,$Xi);
530 $code.=<<___;
531         movdqa          $Xi,$T3
532 ___
533         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^4
534         &reduction_alg9 ($Xhi,$Xi);
535 $code.=<<___;
536         pshufd          \$0b01001110,$T3,$T1
537         pshufd          \$0b01001110,$Xi,$T2
538         pxor            $T3,$T1                 # Karatsuba pre-processing
539         movdqu          $T3,0x30($Htbl)         # save H^3
540         pxor            $Xi,$T2                 # Karatsuba pre-processing
541         movdqu          $Xi,0x40($Htbl)         # save H^4
542         palignr         \$8,$T1,$T2             # low part is H^3.lo^H^3.hi...
543         movdqu          $T2,0x50($Htbl)         # save Karatsuba "salt"
544 ___
545 }
546 $code.=<<___ if ($win64);
547         movaps  (%rsp),%xmm6
548         lea     0x18(%rsp),%rsp
549 .LSEH_end_gcm_init_clmul:
550 ___
551 $code.=<<___;
552         ret
553 .size   gcm_init_clmul,.-gcm_init_clmul
554 ___
555 }
556
557 { my ($Xip,$Htbl)=@_4args;
558
559 $code.=<<___;
560 .globl  gcm_gmult_clmul
561 .type   gcm_gmult_clmul,\@abi-omnipotent
562 .align  16
563 gcm_gmult_clmul:
564 .L_gmult_clmul:
565         movdqu          ($Xip),$Xi
566         movdqa          .Lbswap_mask(%rip),$T3
567         movdqu          ($Htbl),$Hkey
568         movdqu          0x20($Htbl),$T2
569         pshufb          $T3,$Xi
570 ___
571         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
572 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
573         # experimental alternative. special thing about is that there
574         # no dependency between the two multiplications... 
575         mov             \$`0xE1<<1`,%eax
576         mov             \$0xA040608020C0E000,%r10       # ((7..0)·0xE0)&0xff
577         mov             \$0x07,%r11d
578         movq            %rax,$T1
579         movq            %r10,$T2
580         movq            %r11,$T3                # borrow $T3
581         pand            $Xi,$T3
582         pshufb          $T3,$T2                 # ($Xi&7)·0xE0
583         movq            %rax,$T3
584         pclmulqdq       \$0x00,$Xi,$T1          # Â·(0xE1<<1)
585         pxor            $Xi,$T2
586         pslldq          \$15,$T2
587         paddd           $T2,$T2                 # <<(64+56+1)
588         pxor            $T2,$Xi
589         pclmulqdq       \$0x01,$T3,$Xi
590         movdqa          .Lbswap_mask(%rip),$T3  # reload $T3
591         psrldq          \$1,$T1
592         pxor            $T1,$Xhi
593         pslldq          \$7,$Xi
594         pxor            $Xhi,$Xi
595 ___
596 $code.=<<___;
597         pshufb          $T3,$Xi
598         movdqu          $Xi,($Xip)
599         ret
600 .size   gcm_gmult_clmul,.-gcm_gmult_clmul
601 ___
602 }
603 \f
604 { my ($Xip,$Htbl,$inp,$len)=@_4args;
605   my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
606   my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
607
608 $code.=<<___;
609 .globl  gcm_ghash_clmul
610 .type   gcm_ghash_clmul,\@abi-omnipotent
611 .align  32
612 gcm_ghash_clmul:
613 .L_ghash_clmul:
614 ___
615 $code.=<<___ if ($win64);
616         lea     -0x88(%rsp),%rax
617 .LSEH_begin_gcm_ghash_clmul:
618         # I can't trust assembler to use specific encoding:-(
619         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
620         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
621         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
622         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
623         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
624         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
625         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
626         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
627         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
628         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
629         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
630 ___
631 $code.=<<___;
632         movdqa          .Lbswap_mask(%rip),$T3
633
634         movdqu          ($Xip),$Xi
635         movdqu          ($Htbl),$Hkey
636         movdqu          0x20($Htbl),$HK
637         pshufb          $T3,$Xi
638
639         sub             \$0x10,$len
640         jz              .Lodd_tail
641
642         movdqu          0x10($Htbl),$Hkey2
643 ___
644 if ($do4xaggr) {
645 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
646
647 $code.=<<___;
648         mov             OPENSSL_ia32cap_P+4(%rip),%eax
649         cmp             \$0x30,$len
650         jb              .Lskip4x
651
652         and             \$`1<<26|1<<22`,%eax    # isolate MOVBE+XSAVE
653         cmp             \$`1<<22`,%eax          # check for MOVBE without XSAVE
654         je              .Lskip4x
655
656         sub             \$0x30,$len
657         mov             \$0xA040608020C0E000,%rax       # ((7..0)·0xE0)&0xff
658         movdqu          0x30($Htbl),$Hkey3
659         movdqu          0x40($Htbl),$Hkey4
660
661         #######
662         # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
663         #
664         movdqu          0x30($inp),$Xln
665          movdqu         0x20($inp),$Xl
666         pshufb          $T3,$Xln
667          pshufb         $T3,$Xl
668         movdqa          $Xln,$Xhn
669         pshufd          \$0b01001110,$Xln,$Xmn
670         pxor            $Xln,$Xmn
671         pclmulqdq       \$0x00,$Hkey,$Xln
672         pclmulqdq       \$0x11,$Hkey,$Xhn
673         pclmulqdq       \$0x00,$HK,$Xmn
674
675         movdqa          $Xl,$Xh
676         pshufd          \$0b01001110,$Xl,$Xm
677         pxor            $Xl,$Xm
678         pclmulqdq       \$0x00,$Hkey2,$Xl
679         pclmulqdq       \$0x11,$Hkey2,$Xh
680         pclmulqdq       \$0x10,$HK,$Xm
681         xorps           $Xl,$Xln
682         xorps           $Xh,$Xhn
683         movups          0x50($Htbl),$HK
684         xorps           $Xm,$Xmn
685
686         movdqu          0x10($inp),$Xl
687          movdqu         0($inp),$T1
688         pshufb          $T3,$Xl
689          pshufb         $T3,$T1
690         movdqa          $Xl,$Xh
691         pshufd          \$0b01001110,$Xl,$Xm
692          pxor           $T1,$Xi
693         pxor            $Xl,$Xm
694         pclmulqdq       \$0x00,$Hkey3,$Xl
695          movdqa         $Xi,$Xhi
696          pshufd         \$0b01001110,$Xi,$T1
697          pxor           $Xi,$T1
698         pclmulqdq       \$0x11,$Hkey3,$Xh
699         pclmulqdq       \$0x00,$HK,$Xm
700         xorps           $Xl,$Xln
701         xorps           $Xh,$Xhn
702
703         lea     0x40($inp),$inp
704         sub     \$0x40,$len
705         jc      .Ltail4x
706
707         jmp     .Lmod4_loop
708 .align  32
709 .Lmod4_loop:
710         pclmulqdq       \$0x00,$Hkey4,$Xi
711         xorps           $Xm,$Xmn
712          movdqu         0x30($inp),$Xl
713          pshufb         $T3,$Xl
714         pclmulqdq       \$0x11,$Hkey4,$Xhi
715         xorps           $Xln,$Xi
716          movdqu         0x20($inp),$Xln
717          movdqa         $Xl,$Xh
718         pclmulqdq       \$0x10,$HK,$T1
719          pshufd         \$0b01001110,$Xl,$Xm
720         xorps           $Xhn,$Xhi
721          pxor           $Xl,$Xm
722          pshufb         $T3,$Xln
723         movups          0x20($Htbl),$HK
724         xorps           $Xmn,$T1
725          pclmulqdq      \$0x00,$Hkey,$Xl
726          pshufd         \$0b01001110,$Xln,$Xmn
727
728         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
729          movdqa         $Xln,$Xhn
730         pxor            $Xhi,$T1                #
731          pxor           $Xln,$Xmn
732         movdqa          $T1,$T2                 #
733          pclmulqdq      \$0x11,$Hkey,$Xh
734         pslldq          \$8,$T1
735         psrldq          \$8,$T2                 #
736         pxor            $T1,$Xi
737         movdqa          .L7_mask(%rip),$T1
738         pxor            $T2,$Xhi                #
739         movq            %rax,$T2
740
741         pand            $Xi,$T1                 # 1st phase
742         pshufb          $T1,$T2                 #
743         pxor            $Xi,$T2                 #
744          pclmulqdq      \$0x00,$HK,$Xm
745         psllq           \$57,$T2                #
746         movdqa          $T2,$T1                 #
747         pslldq          \$8,$T2
748          pclmulqdq      \$0x00,$Hkey2,$Xln
749         psrldq          \$8,$T1                 #       
750         pxor            $T2,$Xi
751         pxor            $T1,$Xhi                #
752         movdqu          0($inp),$T1
753
754         movdqa          $Xi,$T2                 # 2nd phase
755         psrlq           \$1,$Xi
756          pclmulqdq      \$0x11,$Hkey2,$Xhn
757          xorps          $Xl,$Xln
758          movdqu         0x10($inp),$Xl
759          pshufb         $T3,$Xl
760          pclmulqdq      \$0x10,$HK,$Xmn
761          xorps          $Xh,$Xhn
762          movups         0x50($Htbl),$HK
763         pshufb          $T3,$T1
764         pxor            $T2,$Xhi                #
765         pxor            $Xi,$T2
766         psrlq           \$5,$Xi
767
768          movdqa         $Xl,$Xh
769          pxor           $Xm,$Xmn
770          pshufd         \$0b01001110,$Xl,$Xm
771         pxor            $T2,$Xi                 #
772         pxor            $T1,$Xhi
773          pxor           $Xl,$Xm
774          pclmulqdq      \$0x00,$Hkey3,$Xl
775         psrlq           \$1,$Xi                 #
776         pxor            $Xhi,$Xi                #
777         movdqa          $Xi,$Xhi
778          pclmulqdq      \$0x11,$Hkey3,$Xh
779          xorps          $Xl,$Xln
780         pshufd          \$0b01001110,$Xi,$T1
781         pxor            $Xi,$T1
782
783          pclmulqdq      \$0x00,$HK,$Xm
784          xorps          $Xh,$Xhn
785
786         lea     0x40($inp),$inp
787         sub     \$0x40,$len
788         jnc     .Lmod4_loop
789
790 .Ltail4x:
791         pclmulqdq       \$0x00,$Hkey4,$Xi
792         pclmulqdq       \$0x11,$Hkey4,$Xhi
793         pclmulqdq       \$0x10,$HK,$T1
794         xorps           $Xm,$Xmn
795         xorps           $Xln,$Xi
796         xorps           $Xhn,$Xhi
797         pxor            $Xi,$Xhi                # aggregated Karatsuba post-processing
798         pxor            $Xmn,$T1
799
800         pxor            $Xhi,$T1                #
801         pxor            $Xi,$Xhi
802
803         movdqa          $T1,$T2                 #
804         psrldq          \$8,$T1
805         pslldq          \$8,$T2                 #
806         pxor            $T1,$Xhi
807         pxor            $T2,$Xi                 #
808 ___
809         &reduction_alg9($Xhi,$Xi);
810 $code.=<<___;
811         add     \$0x40,$len
812         jz      .Ldone
813         movdqu  0x20($Htbl),$HK
814         sub     \$0x10,$len
815         jz      .Lodd_tail
816 .Lskip4x:
817 ___
818 }
819 $code.=<<___;
820         #######
821         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
822         #       [(H*Ii+1) + (H*Xi+1)] mod P =
823         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
824         #
825         movdqu          ($inp),$T1              # Ii
826         movdqu          16($inp),$Xln           # Ii+1
827         pshufb          $T3,$T1
828         pshufb          $T3,$Xln
829         pxor            $T1,$Xi                 # Ii+Xi
830
831         movdqa          $Xln,$Xhn
832         pshufd          \$0b01001110,$Xln,$Xmn
833         pxor            $Xln,$Xmn
834         pclmulqdq       \$0x00,$Hkey,$Xln
835         pclmulqdq       \$0x11,$Hkey,$Xhn
836         pclmulqdq       \$0x00,$HK,$Xmn
837
838         lea             32($inp),$inp           # i+=2
839         nop
840         sub             \$0x20,$len
841         jbe             .Leven_tail
842         nop
843         jmp             .Lmod_loop
844
845 .align  32
846 .Lmod_loop:
847         movdqa          $Xi,$Xhi
848         movdqa          $Xmn,$T1
849         pshufd          \$0b01001110,$Xi,$Xmn   #
850         pxor            $Xi,$Xmn                #
851
852         pclmulqdq       \$0x00,$Hkey2,$Xi
853         pclmulqdq       \$0x11,$Hkey2,$Xhi
854         pclmulqdq       \$0x10,$HK,$Xmn
855
856         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
857         pxor            $Xhn,$Xhi
858           movdqu        ($inp),$T2              # Ii
859         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
860           pshufb        $T3,$T2
861           movdqu        16($inp),$Xln           # Ii+1
862
863         pxor            $Xhi,$T1
864           pxor          $T2,$Xhi                # "Ii+Xi", consume early
865         pxor            $T1,$Xmn
866          pshufb         $T3,$Xln
867         movdqa          $Xmn,$T1                #
868         psrldq          \$8,$T1
869         pslldq          \$8,$Xmn                #
870         pxor            $T1,$Xhi
871         pxor            $Xmn,$Xi                #
872
873         movdqa          $Xln,$Xhn               #
874
875           movdqa        $Xi,$T2                 # 1st phase
876           movdqa        $Xi,$T1
877           psllq         \$5,$Xi
878           pxor          $Xi,$T1                 #
879         pclmulqdq       \$0x00,$Hkey,$Xln       #######
880           psllq         \$1,$Xi
881           pxor          $T1,$Xi                 #
882           psllq         \$57,$Xi                #
883           movdqa        $Xi,$T1                 #
884           pslldq        \$8,$Xi
885           psrldq        \$8,$T1                 #       
886           pxor          $T2,$Xi
887         pshufd          \$0b01001110,$Xhn,$Xmn
888           pxor          $T1,$Xhi                #
889         pxor            $Xhn,$Xmn               #
890
891           movdqa        $Xi,$T2                 # 2nd phase
892           psrlq         \$1,$Xi
893         pclmulqdq       \$0x11,$Hkey,$Xhn       #######
894           pxor          $T2,$Xhi                #
895           pxor          $Xi,$T2
896           psrlq         \$5,$Xi
897           pxor          $T2,$Xi                 #
898         lea             32($inp),$inp
899           psrlq         \$1,$Xi                 #
900         pclmulqdq       \$0x00,$HK,$Xmn         #######
901           pxor          $Xhi,$Xi                #
902
903         sub             \$0x20,$len
904         ja              .Lmod_loop
905
906 .Leven_tail:
907          movdqa         $Xi,$Xhi
908          movdqa         $Xmn,$T1
909          pshufd         \$0b01001110,$Xi,$Xmn   #
910          pxor           $Xi,$Xmn                #
911
912         pclmulqdq       \$0x00,$Hkey2,$Xi
913         pclmulqdq       \$0x11,$Hkey2,$Xhi
914         pclmulqdq       \$0x10,$HK,$Xmn
915
916         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
917         pxor            $Xhn,$Xhi
918         pxor            $Xi,$T1
919         pxor            $Xhi,$T1
920         pxor            $T1,$Xmn
921         movdqa          $Xmn,$T1                #
922         psrldq          \$8,$T1
923         pslldq          \$8,$Xmn                #
924         pxor            $T1,$Xhi
925         pxor            $Xmn,$Xi                #
926 ___
927         &reduction_alg9 ($Xhi,$Xi);
928 $code.=<<___;
929         test            $len,$len
930         jnz             .Ldone
931
932 .Lodd_tail:
933         movdqu          ($inp),$T1              # Ii
934         pshufb          $T3,$T1
935         pxor            $T1,$Xi                 # Ii+Xi
936 ___
937         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H*(Ii+Xi)
938         &reduction_alg9 ($Xhi,$Xi);
939 $code.=<<___;
940 .Ldone:
941         pshufb          $T3,$Xi
942         movdqu          $Xi,($Xip)
943 ___
944 $code.=<<___ if ($win64);
945         movaps  (%rsp),%xmm6
946         movaps  0x10(%rsp),%xmm7
947         movaps  0x20(%rsp),%xmm8
948         movaps  0x30(%rsp),%xmm9
949         movaps  0x40(%rsp),%xmm10
950         movaps  0x50(%rsp),%xmm11
951         movaps  0x60(%rsp),%xmm12
952         movaps  0x70(%rsp),%xmm13
953         movaps  0x80(%rsp),%xmm14
954         movaps  0x90(%rsp),%xmm15
955         lea     0xa8(%rsp),%rsp
956 .LSEH_end_gcm_ghash_clmul:
957 ___
958 $code.=<<___;
959         ret
960 .size   gcm_ghash_clmul,.-gcm_ghash_clmul
961 ___
962 }
963 \f
964 $code.=<<___;
965 .globl  gcm_init_avx
966 .type   gcm_init_avx,\@abi-omnipotent
967 .align  32
968 gcm_init_avx:
969 ___
970 if ($avx) {
971 my ($Htbl,$Xip)=@_4args;
972 my $HK="%xmm6";
973
974 $code.=<<___ if ($win64);
975 .LSEH_begin_gcm_init_avx:
976         # I can't trust assembler to use specific encoding:-(
977         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
978         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
979 ___
980 $code.=<<___;
981         vzeroupper
982
983         vmovdqu         ($Xip),$Hkey
984         vpshufd         \$0b01001110,$Hkey,$Hkey        # dword swap
985
986         # <<1 twist
987         vpshufd         \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
988         vpsrlq          \$63,$Hkey,$T1
989         vpsllq          \$1,$Hkey,$Hkey
990         vpxor           $T3,$T3,$T3             #
991         vpcmpgtd        $T2,$T3,$T3             # broadcast carry bit
992         vpslldq         \$8,$T1,$T1
993         vpor            $T1,$Hkey,$Hkey         # H<<=1
994
995         # magic reduction
996         vpand           .L0x1c2_polynomial(%rip),$T3,$T3
997         vpxor           $T3,$Hkey,$Hkey         # if(carry) H^=0x1c2_polynomial
998
999         vpunpckhqdq     $Hkey,$Hkey,$HK
1000         vmovdqa         $Hkey,$Xi
1001         vpxor           $Hkey,$HK,$HK
1002         mov             \$4,%r10                # up to H^8
1003         jmp             .Linit_start_avx
1004 ___
1005
1006 sub clmul64x64_avx {
1007 my ($Xhi,$Xi,$Hkey,$HK)=@_;
1008
1009 if (!defined($HK)) {    $HK = $T2;
1010 $code.=<<___;
1011         vpunpckhqdq     $Xi,$Xi,$T1
1012         vpunpckhqdq     $Hkey,$Hkey,$T2
1013         vpxor           $Xi,$T1,$T1             #
1014         vpxor           $Hkey,$T2,$T2
1015 ___
1016 } else {
1017 $code.=<<___;
1018         vpunpckhqdq     $Xi,$Xi,$T1
1019         vpxor           $Xi,$T1,$T1             #
1020 ___
1021 }
1022 $code.=<<___;
1023         vpclmulqdq      \$0x11,$Hkey,$Xi,$Xhi   #######
1024         vpclmulqdq      \$0x00,$Hkey,$Xi,$Xi    #######
1025         vpclmulqdq      \$0x00,$HK,$T1,$T1      #######
1026         vpxor           $Xi,$Xhi,$T2            #
1027         vpxor           $T2,$T1,$T1             #
1028
1029         vpslldq         \$8,$T1,$T2             #
1030         vpsrldq         \$8,$T1,$T1
1031         vpxor           $T2,$Xi,$Xi             #
1032         vpxor           $T1,$Xhi,$Xhi
1033 ___
1034 }
1035
1036 sub reduction_avx {
1037 my ($Xhi,$Xi) = @_;
1038
1039 $code.=<<___;
1040         vpsllq          \$57,$Xi,$T1            # 1st phase
1041         vpsllq          \$62,$Xi,$T2
1042         vpxor           $T1,$T2,$T2             #
1043         vpsllq          \$63,$Xi,$T1
1044         vpxor           $T1,$T2,$T2             #
1045         vpslldq         \$8,$T2,$T1             #
1046         vpsrldq         \$8,$T2,$T2
1047         vpxor           $T1,$Xi,$Xi             #
1048         vpxor           $T2,$Xhi,$Xhi
1049
1050         vpsrlq          \$1,$Xi,$T2             # 2nd phase
1051         vpxor           $Xi,$Xhi,$Xhi
1052         vpxor           $T2,$Xi,$Xi             #
1053         vpsrlq          \$5,$T2,$T2
1054         vpxor           $T2,$Xi,$Xi             #
1055         vpsrlq          \$1,$Xi,$Xi             #
1056         vpxor           $Xhi,$Xi,$Xi            #
1057 ___
1058 }
1059
1060 $code.=<<___;
1061 .align  32
1062 .Linit_loop_avx:
1063         vpalignr        \$8,$T1,$T2,$T3         # low part is H.lo^H.hi...
1064         vmovdqu         $T3,-0x10($Htbl)        # save Karatsuba "salt"
1065 ___
1066         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^3,5,7
1067         &reduction_avx  ($Xhi,$Xi);
1068 $code.=<<___;
1069 .Linit_start_avx:
1070         vmovdqa         $Xi,$T3
1071 ___
1072         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^2,4,6,8
1073         &reduction_avx  ($Xhi,$Xi);
1074 $code.=<<___;
1075         vpshufd         \$0b01001110,$T3,$T1
1076         vpshufd         \$0b01001110,$Xi,$T2
1077         vpxor           $T3,$T1,$T1             # Karatsuba pre-processing
1078         vmovdqu         $T3,0x00($Htbl)         # save H^1,3,5,7
1079         vpxor           $Xi,$T2,$T2             # Karatsuba pre-processing
1080         vmovdqu         $Xi,0x10($Htbl)         # save H^2,4,6,8
1081         lea             0x30($Htbl),$Htbl
1082         sub             \$1,%r10
1083         jnz             .Linit_loop_avx
1084
1085         vpalignr        \$8,$T2,$T1,$T3         # last "salt" is flipped
1086         vmovdqu         $T3,-0x10($Htbl)
1087
1088         vzeroupper
1089 ___
1090 $code.=<<___ if ($win64);
1091         movaps  (%rsp),%xmm6
1092         lea     0x18(%rsp),%rsp
1093 .LSEH_end_gcm_init_avx:
1094 ___
1095 $code.=<<___;
1096         ret
1097 .size   gcm_init_avx,.-gcm_init_avx
1098 ___
1099 } else {
1100 $code.=<<___;
1101         jmp     .L_init_clmul
1102 .size   gcm_init_avx,.-gcm_init_avx
1103 ___
1104 }
1105
1106 $code.=<<___;
1107 .globl  gcm_gmult_avx
1108 .type   gcm_gmult_avx,\@abi-omnipotent
1109 .align  32
1110 gcm_gmult_avx:
1111         jmp     .L_gmult_clmul
1112 .size   gcm_gmult_avx,.-gcm_gmult_avx
1113 ___
1114 \f
1115 $code.=<<___;
1116 .globl  gcm_ghash_avx
1117 .type   gcm_ghash_avx,\@abi-omnipotent
1118 .align  32
1119 gcm_ghash_avx:
1120 ___
1121 if ($avx) {
1122 my ($Xip,$Htbl,$inp,$len)=@_4args;
1123 my ($Xlo,$Xhi,$Xmi,
1124     $Zlo,$Zhi,$Zmi,
1125     $Hkey,$HK,$T1,$T2,
1126     $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1127
1128 $code.=<<___ if ($win64);
1129         lea     -0x88(%rsp),%rax
1130 .LSEH_begin_gcm_ghash_avx:
1131         # I can't trust assembler to use specific encoding:-(
1132         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1133         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
1134         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
1135         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
1136         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
1137         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
1138         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
1139         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
1140         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
1141         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
1142         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
1143 ___
1144 $code.=<<___;
1145         vzeroupper
1146
1147         vmovdqu         ($Xip),$Xi              # load $Xi
1148         lea             .L0x1c2_polynomial(%rip),%r10
1149         lea             0x40($Htbl),$Htbl       # size optimization
1150         vmovdqu         .Lbswap_mask(%rip),$bswap
1151         vpshufb         $bswap,$Xi,$Xi
1152         cmp             \$0x80,$len
1153         jb              .Lshort_avx
1154         sub             \$0x80,$len
1155
1156         vmovdqu         0x70($inp),$Ii          # I[7]
1157         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1158         vpshufb         $bswap,$Ii,$Ii
1159         vmovdqu         0x20-0x40($Htbl),$HK
1160
1161         vpunpckhqdq     $Ii,$Ii,$T2
1162          vmovdqu        0x60($inp),$Ij          # I[6]
1163         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1164         vpxor           $Ii,$T2,$T2
1165          vpshufb        $bswap,$Ij,$Ij
1166         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1167          vmovdqu        0x10-0x40($Htbl),$Hkey  # $Hkey^2
1168          vpunpckhqdq    $Ij,$Ij,$T1
1169          vmovdqu        0x50($inp),$Ii          # I[5]
1170         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1171          vpxor          $Ij,$T1,$T1
1172
1173          vpshufb        $bswap,$Ii,$Ii
1174         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1175          vpunpckhqdq    $Ii,$Ii,$T2
1176         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1177          vmovdqu        0x30-0x40($Htbl),$Hkey  # $Hkey^3
1178          vpxor          $Ii,$T2,$T2
1179          vmovdqu        0x40($inp),$Ij          # I[4]
1180         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1181          vmovdqu        0x50-0x40($Htbl),$HK
1182
1183          vpshufb        $bswap,$Ij,$Ij
1184         vpxor           $Xlo,$Zlo,$Zlo
1185         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1186         vpxor           $Xhi,$Zhi,$Zhi
1187          vpunpckhqdq    $Ij,$Ij,$T1
1188         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1189          vmovdqu        0x40-0x40($Htbl),$Hkey  # $Hkey^4
1190         vpxor           $Xmi,$Zmi,$Zmi
1191         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1192          vpxor          $Ij,$T1,$T1
1193
1194          vmovdqu        0x30($inp),$Ii          # I[3]
1195         vpxor           $Zlo,$Xlo,$Xlo
1196         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1197         vpxor           $Zhi,$Xhi,$Xhi
1198          vpshufb        $bswap,$Ii,$Ii
1199         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1200          vmovdqu        0x60-0x40($Htbl),$Hkey  # $Hkey^5
1201         vpxor           $Zmi,$Xmi,$Xmi
1202          vpunpckhqdq    $Ii,$Ii,$T2
1203         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1204          vmovdqu        0x80-0x40($Htbl),$HK
1205          vpxor          $Ii,$T2,$T2
1206
1207          vmovdqu        0x20($inp),$Ij          # I[2]
1208         vpxor           $Xlo,$Zlo,$Zlo
1209         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1210         vpxor           $Xhi,$Zhi,$Zhi
1211          vpshufb        $bswap,$Ij,$Ij
1212         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1213          vmovdqu        0x70-0x40($Htbl),$Hkey  # $Hkey^6
1214         vpxor           $Xmi,$Zmi,$Zmi
1215          vpunpckhqdq    $Ij,$Ij,$T1
1216         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1217          vpxor          $Ij,$T1,$T1
1218
1219          vmovdqu        0x10($inp),$Ii          # I[1]
1220         vpxor           $Zlo,$Xlo,$Xlo
1221         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1222         vpxor           $Zhi,$Xhi,$Xhi
1223          vpshufb        $bswap,$Ii,$Ii
1224         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1225          vmovdqu        0x90-0x40($Htbl),$Hkey  # $Hkey^7
1226         vpxor           $Zmi,$Xmi,$Xmi
1227          vpunpckhqdq    $Ii,$Ii,$T2
1228         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1229          vmovdqu        0xb0-0x40($Htbl),$HK
1230          vpxor          $Ii,$T2,$T2
1231
1232          vmovdqu        ($inp),$Ij              # I[0]
1233         vpxor           $Xlo,$Zlo,$Zlo
1234         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1235         vpxor           $Xhi,$Zhi,$Zhi
1236          vpshufb        $bswap,$Ij,$Ij
1237         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1238          vmovdqu        0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1239         vpxor           $Xmi,$Zmi,$Zmi
1240         vpclmulqdq      \$0x10,$HK,$T2,$Xmi
1241
1242         lea             0x80($inp),$inp
1243         cmp             \$0x80,$len
1244         jb              .Ltail_avx
1245
1246         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1247         sub             \$0x80,$len
1248         jmp             .Loop8x_avx
1249
1250 .align  32
1251 .Loop8x_avx:
1252         vpunpckhqdq     $Ij,$Ij,$T1
1253          vmovdqu        0x70($inp),$Ii          # I[7]
1254         vpxor           $Xlo,$Zlo,$Zlo
1255         vpxor           $Ij,$T1,$T1
1256         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xi
1257          vpshufb        $bswap,$Ii,$Ii
1258         vpxor           $Xhi,$Zhi,$Zhi
1259         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xo
1260          vmovdqu        0x00-0x40($Htbl),$Hkey  # $Hkey^1
1261          vpunpckhqdq    $Ii,$Ii,$T2
1262         vpxor           $Xmi,$Zmi,$Zmi
1263         vpclmulqdq      \$0x00,$HK,$T1,$Tred
1264          vmovdqu        0x20-0x40($Htbl),$HK
1265          vpxor          $Ii,$T2,$T2
1266
1267           vmovdqu       0x60($inp),$Ij          # I[6]
1268          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1269         vpxor           $Zlo,$Xi,$Xi            # collect result
1270           vpshufb       $bswap,$Ij,$Ij
1271          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1272         vxorps          $Zhi,$Xo,$Xo
1273           vmovdqu       0x10-0x40($Htbl),$Hkey  # $Hkey^2
1274          vpunpckhqdq    $Ij,$Ij,$T1
1275          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1276         vpxor           $Zmi,$Tred,$Tred
1277          vxorps         $Ij,$T1,$T1
1278
1279           vmovdqu       0x50($inp),$Ii          # I[5]
1280         vpxor           $Xi,$Tred,$Tred         # aggregated Karatsuba post-processing
1281          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1282         vpxor           $Xo,$Tred,$Tred
1283         vpslldq         \$8,$Tred,$T2
1284          vpxor          $Xlo,$Zlo,$Zlo
1285          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1286         vpsrldq         \$8,$Tred,$Tred
1287         vpxor           $T2, $Xi, $Xi
1288           vmovdqu       0x30-0x40($Htbl),$Hkey  # $Hkey^3
1289           vpshufb       $bswap,$Ii,$Ii
1290         vxorps          $Tred,$Xo, $Xo
1291          vpxor          $Xhi,$Zhi,$Zhi
1292          vpunpckhqdq    $Ii,$Ii,$T2
1293          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1294           vmovdqu       0x50-0x40($Htbl),$HK
1295          vpxor          $Ii,$T2,$T2
1296          vpxor          $Xmi,$Zmi,$Zmi
1297
1298           vmovdqu       0x40($inp),$Ij          # I[4]
1299         vpalignr        \$8,$Xi,$Xi,$Tred       # 1st phase
1300          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1301           vpshufb       $bswap,$Ij,$Ij
1302          vpxor          $Zlo,$Xlo,$Xlo
1303          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1304           vmovdqu       0x40-0x40($Htbl),$Hkey  # $Hkey^4
1305          vpunpckhqdq    $Ij,$Ij,$T1
1306          vpxor          $Zhi,$Xhi,$Xhi
1307          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1308          vxorps         $Ij,$T1,$T1
1309          vpxor          $Zmi,$Xmi,$Xmi
1310
1311           vmovdqu       0x30($inp),$Ii          # I[3]
1312         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1313          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1314           vpshufb       $bswap,$Ii,$Ii
1315          vpxor          $Xlo,$Zlo,$Zlo
1316          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1317           vmovdqu       0x60-0x40($Htbl),$Hkey  # $Hkey^5
1318          vpunpckhqdq    $Ii,$Ii,$T2
1319          vpxor          $Xhi,$Zhi,$Zhi
1320          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1321           vmovdqu       0x80-0x40($Htbl),$HK
1322          vpxor          $Ii,$T2,$T2
1323          vpxor          $Xmi,$Zmi,$Zmi
1324
1325           vmovdqu       0x20($inp),$Ij          # I[2]
1326          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1327           vpshufb       $bswap,$Ij,$Ij
1328          vpxor          $Zlo,$Xlo,$Xlo
1329          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1330           vmovdqu       0x70-0x40($Htbl),$Hkey  # $Hkey^6
1331          vpunpckhqdq    $Ij,$Ij,$T1
1332          vpxor          $Zhi,$Xhi,$Xhi
1333          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1334          vpxor          $Ij,$T1,$T1
1335          vpxor          $Zmi,$Xmi,$Xmi
1336         vxorps          $Tred,$Xi,$Xi
1337
1338           vmovdqu       0x10($inp),$Ii          # I[1]
1339         vpalignr        \$8,$Xi,$Xi,$Tred       # 2nd phase
1340          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1341           vpshufb       $bswap,$Ii,$Ii
1342          vpxor          $Xlo,$Zlo,$Zlo
1343          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1344           vmovdqu       0x90-0x40($Htbl),$Hkey  # $Hkey^7
1345         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1346         vxorps          $Xo,$Tred,$Tred
1347          vpunpckhqdq    $Ii,$Ii,$T2
1348          vpxor          $Xhi,$Zhi,$Zhi
1349          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1350           vmovdqu       0xb0-0x40($Htbl),$HK
1351          vpxor          $Ii,$T2,$T2
1352          vpxor          $Xmi,$Zmi,$Zmi
1353
1354           vmovdqu       ($inp),$Ij              # I[0]
1355          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1356           vpshufb       $bswap,$Ij,$Ij
1357          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1358           vmovdqu       0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1359         vpxor           $Tred,$Ij,$Ij
1360          vpclmulqdq     \$0x10,$HK,  $T2,$Xmi
1361         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1362
1363         lea             0x80($inp),$inp
1364         sub             \$0x80,$len
1365         jnc             .Loop8x_avx
1366
1367         add             \$0x80,$len
1368         jmp             .Ltail_no_xor_avx
1369
1370 .align  32
1371 .Lshort_avx:
1372         vmovdqu         -0x10($inp,$len),$Ii    # very last word
1373         lea             ($inp,$len),$inp
1374         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1375         vmovdqu         0x20-0x40($Htbl),$HK
1376         vpshufb         $bswap,$Ii,$Ij
1377
1378         vmovdqa         $Xlo,$Zlo               # subtle way to zero $Zlo,
1379         vmovdqa         $Xhi,$Zhi               # $Zhi and
1380         vmovdqa         $Xmi,$Zmi               # $Zmi
1381         sub             \$0x10,$len
1382         jz              .Ltail_avx
1383
1384         vpunpckhqdq     $Ij,$Ij,$T1
1385         vpxor           $Xlo,$Zlo,$Zlo
1386         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1387         vpxor           $Ij,$T1,$T1
1388          vmovdqu        -0x20($inp),$Ii
1389         vpxor           $Xhi,$Zhi,$Zhi
1390         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1391         vmovdqu         0x10-0x40($Htbl),$Hkey  # $Hkey^2
1392          vpshufb        $bswap,$Ii,$Ij
1393         vpxor           $Xmi,$Zmi,$Zmi
1394         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1395         vpsrldq         \$8,$HK,$HK
1396         sub             \$0x10,$len
1397         jz              .Ltail_avx
1398
1399         vpunpckhqdq     $Ij,$Ij,$T1
1400         vpxor           $Xlo,$Zlo,$Zlo
1401         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1402         vpxor           $Ij,$T1,$T1
1403          vmovdqu        -0x30($inp),$Ii
1404         vpxor           $Xhi,$Zhi,$Zhi
1405         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1406         vmovdqu         0x30-0x40($Htbl),$Hkey  # $Hkey^3
1407          vpshufb        $bswap,$Ii,$Ij
1408         vpxor           $Xmi,$Zmi,$Zmi
1409         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1410         vmovdqu         0x50-0x40($Htbl),$HK
1411         sub             \$0x10,$len
1412         jz              .Ltail_avx
1413
1414         vpunpckhqdq     $Ij,$Ij,$T1
1415         vpxor           $Xlo,$Zlo,$Zlo
1416         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1417         vpxor           $Ij,$T1,$T1
1418          vmovdqu        -0x40($inp),$Ii
1419         vpxor           $Xhi,$Zhi,$Zhi
1420         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1421         vmovdqu         0x40-0x40($Htbl),$Hkey  # $Hkey^4
1422          vpshufb        $bswap,$Ii,$Ij
1423         vpxor           $Xmi,$Zmi,$Zmi
1424         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1425         vpsrldq         \$8,$HK,$HK
1426         sub             \$0x10,$len
1427         jz              .Ltail_avx
1428
1429         vpunpckhqdq     $Ij,$Ij,$T1
1430         vpxor           $Xlo,$Zlo,$Zlo
1431         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1432         vpxor           $Ij,$T1,$T1
1433          vmovdqu        -0x50($inp),$Ii
1434         vpxor           $Xhi,$Zhi,$Zhi
1435         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1436         vmovdqu         0x60-0x40($Htbl),$Hkey  # $Hkey^5
1437          vpshufb        $bswap,$Ii,$Ij
1438         vpxor           $Xmi,$Zmi,$Zmi
1439         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1440         vmovdqu         0x80-0x40($Htbl),$HK
1441         sub             \$0x10,$len
1442         jz              .Ltail_avx
1443
1444         vpunpckhqdq     $Ij,$Ij,$T1
1445         vpxor           $Xlo,$Zlo,$Zlo
1446         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1447         vpxor           $Ij,$T1,$T1
1448          vmovdqu        -0x60($inp),$Ii
1449         vpxor           $Xhi,$Zhi,$Zhi
1450         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1451         vmovdqu         0x70-0x40($Htbl),$Hkey  # $Hkey^6
1452          vpshufb        $bswap,$Ii,$Ij
1453         vpxor           $Xmi,$Zmi,$Zmi
1454         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1455         vpsrldq         \$8,$HK,$HK
1456         sub             \$0x10,$len
1457         jz              .Ltail_avx
1458
1459         vpunpckhqdq     $Ij,$Ij,$T1
1460         vpxor           $Xlo,$Zlo,$Zlo
1461         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1462         vpxor           $Ij,$T1,$T1
1463          vmovdqu        -0x70($inp),$Ii
1464         vpxor           $Xhi,$Zhi,$Zhi
1465         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1466         vmovdqu         0x90-0x40($Htbl),$Hkey  # $Hkey^7
1467          vpshufb        $bswap,$Ii,$Ij
1468         vpxor           $Xmi,$Zmi,$Zmi
1469         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1470         vmovq           0xb8-0x40($Htbl),$HK
1471         sub             \$0x10,$len
1472         jmp             .Ltail_avx
1473
1474 .align  32
1475 .Ltail_avx:
1476         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1477 .Ltail_no_xor_avx:
1478         vpunpckhqdq     $Ij,$Ij,$T1
1479         vpxor           $Xlo,$Zlo,$Zlo
1480         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1481         vpxor           $Ij,$T1,$T1
1482         vpxor           $Xhi,$Zhi,$Zhi
1483         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1484         vpxor           $Xmi,$Zmi,$Zmi
1485         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1486
1487         vmovdqu         (%r10),$Tred
1488
1489         vpxor           $Xlo,$Zlo,$Xi
1490         vpxor           $Xhi,$Zhi,$Xo
1491         vpxor           $Xmi,$Zmi,$Zmi
1492
1493         vpxor           $Xi, $Zmi,$Zmi          # aggregated Karatsuba post-processing
1494         vpxor           $Xo, $Zmi,$Zmi
1495         vpslldq         \$8, $Zmi,$T2
1496         vpsrldq         \$8, $Zmi,$Zmi
1497         vpxor           $T2, $Xi, $Xi
1498         vpxor           $Zmi,$Xo, $Xo
1499
1500         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 1st phase
1501         vpalignr        \$8,$Xi,$Xi,$Xi
1502         vpxor           $T2,$Xi,$Xi
1503
1504         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 2nd phase
1505         vpalignr        \$8,$Xi,$Xi,$Xi
1506         vpxor           $Xo,$Xi,$Xi
1507         vpxor           $T2,$Xi,$Xi
1508
1509         cmp             \$0,$len
1510         jne             .Lshort_avx
1511
1512         vpshufb         $bswap,$Xi,$Xi
1513         vmovdqu         $Xi,($Xip)
1514         vzeroupper
1515 ___
1516 $code.=<<___ if ($win64);
1517         movaps  (%rsp),%xmm6
1518         movaps  0x10(%rsp),%xmm7
1519         movaps  0x20(%rsp),%xmm8
1520         movaps  0x30(%rsp),%xmm9
1521         movaps  0x40(%rsp),%xmm10
1522         movaps  0x50(%rsp),%xmm11
1523         movaps  0x60(%rsp),%xmm12
1524         movaps  0x70(%rsp),%xmm13
1525         movaps  0x80(%rsp),%xmm14
1526         movaps  0x90(%rsp),%xmm15
1527         lea     0xa8(%rsp),%rsp
1528 .LSEH_end_gcm_ghash_avx:
1529 ___
1530 $code.=<<___;
1531         ret
1532 .size   gcm_ghash_avx,.-gcm_ghash_avx
1533 ___
1534 } else {
1535 $code.=<<___;
1536         jmp     .L_ghash_clmul
1537 .size   gcm_ghash_avx,.-gcm_ghash_avx
1538 ___
1539 }
1540 \f
1541 $code.=<<___;
1542 .align  64
1543 .Lbswap_mask:
1544         .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1545 .L0x1c2_polynomial:
1546         .byte   1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1547 .L7_mask:
1548         .long   7,0,7,0
1549 .L7_mask_poly:
1550         .long   7,0,`0xE1<<1`,0
1551 .align  64
1552 .type   .Lrem_4bit,\@object
1553 .Lrem_4bit:
1554         .long   0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1555         .long   0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1556         .long   0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1557         .long   0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1558 .type   .Lrem_8bit,\@object
1559 .Lrem_8bit:
1560         .value  0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1561         .value  0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1562         .value  0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1563         .value  0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1564         .value  0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1565         .value  0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1566         .value  0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1567         .value  0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1568         .value  0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1569         .value  0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1570         .value  0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1571         .value  0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1572         .value  0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1573         .value  0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1574         .value  0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1575         .value  0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1576         .value  0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1577         .value  0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1578         .value  0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1579         .value  0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1580         .value  0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1581         .value  0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1582         .value  0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1583         .value  0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1584         .value  0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1585         .value  0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1586         .value  0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1587         .value  0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1588         .value  0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1589         .value  0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1590         .value  0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1591         .value  0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1592
1593 .asciz  "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1594 .align  64
1595 ___
1596 \f
1597 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1598 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1599 if ($win64) {
1600 $rec="%rcx";
1601 $frame="%rdx";
1602 $context="%r8";
1603 $disp="%r9";
1604
1605 $code.=<<___;
1606 .extern __imp_RtlVirtualUnwind
1607 .type   se_handler,\@abi-omnipotent
1608 .align  16
1609 se_handler:
1610         push    %rsi
1611         push    %rdi
1612         push    %rbx
1613         push    %rbp
1614         push    %r12
1615         push    %r13
1616         push    %r14
1617         push    %r15
1618         pushfq
1619         sub     \$64,%rsp
1620
1621         mov     120($context),%rax      # pull context->Rax
1622         mov     248($context),%rbx      # pull context->Rip
1623
1624         mov     8($disp),%rsi           # disp->ImageBase
1625         mov     56($disp),%r11          # disp->HandlerData
1626
1627         mov     0(%r11),%r10d           # HandlerData[0]
1628         lea     (%rsi,%r10),%r10        # prologue label
1629         cmp     %r10,%rbx               # context->Rip<prologue label
1630         jb      .Lin_prologue
1631
1632         mov     152($context),%rax      # pull context->Rsp
1633
1634         mov     4(%r11),%r10d           # HandlerData[1]
1635         lea     (%rsi,%r10),%r10        # epilogue label
1636         cmp     %r10,%rbx               # context->Rip>=epilogue label
1637         jae     .Lin_prologue
1638
1639         lea     24(%rax),%rax           # adjust "rsp"
1640
1641         mov     -8(%rax),%rbx
1642         mov     -16(%rax),%rbp
1643         mov     -24(%rax),%r12
1644         mov     %rbx,144($context)      # restore context->Rbx
1645         mov     %rbp,160($context)      # restore context->Rbp
1646         mov     %r12,216($context)      # restore context->R12
1647
1648 .Lin_prologue:
1649         mov     8(%rax),%rdi
1650         mov     16(%rax),%rsi
1651         mov     %rax,152($context)      # restore context->Rsp
1652         mov     %rsi,168($context)      # restore context->Rsi
1653         mov     %rdi,176($context)      # restore context->Rdi
1654
1655         mov     40($disp),%rdi          # disp->ContextRecord
1656         mov     $context,%rsi           # context
1657         mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
1658         .long   0xa548f3fc              # cld; rep movsq
1659
1660         mov     $disp,%rsi
1661         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1662         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1663         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1664         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1665         mov     40(%rsi),%r10           # disp->ContextRecord
1666         lea     56(%rsi),%r11           # &disp->HandlerData
1667         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1668         mov     %r10,32(%rsp)           # arg5
1669         mov     %r11,40(%rsp)           # arg6
1670         mov     %r12,48(%rsp)           # arg7
1671         mov     %rcx,56(%rsp)           # arg8, (NULL)
1672         call    *__imp_RtlVirtualUnwind(%rip)
1673
1674         mov     \$1,%eax                # ExceptionContinueSearch
1675         add     \$64,%rsp
1676         popfq
1677         pop     %r15
1678         pop     %r14
1679         pop     %r13
1680         pop     %r12
1681         pop     %rbp
1682         pop     %rbx
1683         pop     %rdi
1684         pop     %rsi
1685         ret
1686 .size   se_handler,.-se_handler
1687
1688 .section        .pdata
1689 .align  4
1690         .rva    .LSEH_begin_gcm_gmult_4bit
1691         .rva    .LSEH_end_gcm_gmult_4bit
1692         .rva    .LSEH_info_gcm_gmult_4bit
1693
1694         .rva    .LSEH_begin_gcm_ghash_4bit
1695         .rva    .LSEH_end_gcm_ghash_4bit
1696         .rva    .LSEH_info_gcm_ghash_4bit
1697
1698         .rva    .LSEH_begin_gcm_init_clmul
1699         .rva    .LSEH_end_gcm_init_clmul
1700         .rva    .LSEH_info_gcm_init_clmul
1701
1702         .rva    .LSEH_begin_gcm_ghash_clmul
1703         .rva    .LSEH_end_gcm_ghash_clmul
1704         .rva    .LSEH_info_gcm_ghash_clmul
1705 ___
1706 $code.=<<___    if ($avx);
1707         .rva    .LSEH_begin_gcm_init_avx
1708         .rva    .LSEH_end_gcm_init_avx
1709         .rva    .LSEH_info_gcm_init_clmul
1710
1711         .rva    .LSEH_begin_gcm_ghash_avx
1712         .rva    .LSEH_end_gcm_ghash_avx
1713         .rva    .LSEH_info_gcm_ghash_clmul
1714 ___
1715 $code.=<<___;
1716 .section        .xdata
1717 .align  8
1718 .LSEH_info_gcm_gmult_4bit:
1719         .byte   9,0,0,0
1720         .rva    se_handler
1721         .rva    .Lgmult_prologue,.Lgmult_epilogue       # HandlerData
1722 .LSEH_info_gcm_ghash_4bit:
1723         .byte   9,0,0,0
1724         .rva    se_handler
1725         .rva    .Lghash_prologue,.Lghash_epilogue       # HandlerData
1726 .LSEH_info_gcm_init_clmul:
1727         .byte   0x01,0x08,0x03,0x00
1728         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1729         .byte   0x04,0x22,0x00,0x00     #sub    rsp,0x18
1730 .LSEH_info_gcm_ghash_clmul:
1731         .byte   0x01,0x33,0x16,0x00
1732         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1733         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1734         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1735         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1736         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1737         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1738         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1739         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1740         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1741         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1742         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1743 ___
1744 }
1745 \f
1746 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1747
1748 print $code;
1749
1750 close STDOUT;