04001e6aae793ad4d16bd6dce5c0b30743920da5
[openssl.git] / crypto / modes / asm / ghash-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # March, June 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
19 #
20 #               gcc 3.4.x(*)    assembler
21 #
22 # P4            28.6            14.0            +100%
23 # Opteron       19.3            7.7             +150%
24 # Core2         17.8            8.1(**)         +120%
25 # Atom          31.6            16.8            +88%
26 # VIA Nano      21.8            10.1            +115%
27 #
28 # (*)   comparison is not completely fair, because C results are
29 #       for vanilla "256B" implementation, while assembler results
30 #       are for "528B";-)
31 # (**)  it's mystery [to me] why Core2 result is not same as for
32 #       Opteron;
33
34 # May 2010
35 #
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
38 # techniques.
39 #
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
43
44 # December 2012
45 #
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
61 #
62 # Westmere      1.76(+14%)
63 # Sandy Bridge  1.79(+9%)
64 # Ivy Bridge    1.79(+8%)
65 # Haswell       0.55(+93%) (if system doesn't support AVX)
66 # Bulldozer     1.52(+25%)
67
68 # March 2013
69 #
70 # ... 8x aggregate factor AVX code path is using reduction algorithm
71 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
72 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
73 # sub-optimally in comparison to above mentioned version. But thanks
74 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
75 # it performs in 0.41 cycles per byte on Haswell processor.
76 #
77 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
78
79 $flavour = shift;
80 $output  = shift;
81 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
82
83 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
84
85 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
86 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
87 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
88 die "can't locate x86_64-xlate.pl";
89
90 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
91                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
92         $avx = ($1>=2.19) + ($1>=2.22);
93 }
94
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97         $avx = ($1>=2.09) + ($1>=2.10);
98 }
99
100 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
101             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
102         $avx = ($1>=10) + ($1>=11);
103 }
104
105 open OUT,"| \"$^X\" $xlate $flavour $output";
106 *STDOUT=*OUT;
107
108 $do4xaggr=1;
109
110 # common register layout
111 $nlo="%rax";
112 $nhi="%rbx";
113 $Zlo="%r8";
114 $Zhi="%r9";
115 $tmp="%r10";
116 $rem_4bit = "%r11";
117
118 $Xi="%rdi";
119 $Htbl="%rsi";
120
121 # per-function register layout
122 $cnt="%rcx";
123 $rem="%rdx";
124
125 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/     or
126                         $r =~ s/%[er]([sd]i)/%\1l/      or
127                         $r =~ s/%[er](bp)/%\1l/         or
128                         $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
129
130 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
131 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
132   my $arg = pop;
133     $arg = "\$$arg" if ($arg*1 eq $arg);
134     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
135 }
136 \f
137 { my $N;
138   sub loop() {
139   my $inp = shift;
140
141         $N++;
142 $code.=<<___;
143         xor     $nlo,$nlo
144         xor     $nhi,$nhi
145         mov     `&LB("$Zlo")`,`&LB("$nlo")`
146         mov     `&LB("$Zlo")`,`&LB("$nhi")`
147         shl     \$4,`&LB("$nlo")`
148         mov     \$14,$cnt
149         mov     8($Htbl,$nlo),$Zlo
150         mov     ($Htbl,$nlo),$Zhi
151         and     \$0xf0,`&LB("$nhi")`
152         mov     $Zlo,$rem
153         jmp     .Loop$N
154
155 .align  16
156 .Loop$N:
157         shr     \$4,$Zlo
158         and     \$0xf,$rem
159         mov     $Zhi,$tmp
160         mov     ($inp,$cnt),`&LB("$nlo")`
161         shr     \$4,$Zhi
162         xor     8($Htbl,$nhi),$Zlo
163         shl     \$60,$tmp
164         xor     ($Htbl,$nhi),$Zhi
165         mov     `&LB("$nlo")`,`&LB("$nhi")`
166         xor     ($rem_4bit,$rem,8),$Zhi
167         mov     $Zlo,$rem
168         shl     \$4,`&LB("$nlo")`
169         xor     $tmp,$Zlo
170         dec     $cnt
171         js      .Lbreak$N
172
173         shr     \$4,$Zlo
174         and     \$0xf,$rem
175         mov     $Zhi,$tmp
176         shr     \$4,$Zhi
177         xor     8($Htbl,$nlo),$Zlo
178         shl     \$60,$tmp
179         xor     ($Htbl,$nlo),$Zhi
180         and     \$0xf0,`&LB("$nhi")`
181         xor     ($rem_4bit,$rem,8),$Zhi
182         mov     $Zlo,$rem
183         xor     $tmp,$Zlo
184         jmp     .Loop$N
185
186 .align  16
187 .Lbreak$N:
188         shr     \$4,$Zlo
189         and     \$0xf,$rem
190         mov     $Zhi,$tmp
191         shr     \$4,$Zhi
192         xor     8($Htbl,$nlo),$Zlo
193         shl     \$60,$tmp
194         xor     ($Htbl,$nlo),$Zhi
195         and     \$0xf0,`&LB("$nhi")`
196         xor     ($rem_4bit,$rem,8),$Zhi
197         mov     $Zlo,$rem
198         xor     $tmp,$Zlo
199
200         shr     \$4,$Zlo
201         and     \$0xf,$rem
202         mov     $Zhi,$tmp
203         shr     \$4,$Zhi
204         xor     8($Htbl,$nhi),$Zlo
205         shl     \$60,$tmp
206         xor     ($Htbl,$nhi),$Zhi
207         xor     $tmp,$Zlo
208         xor     ($rem_4bit,$rem,8),$Zhi
209
210         bswap   $Zlo
211         bswap   $Zhi
212 ___
213 }}
214
215 $code=<<___;
216 .text
217 .extern OPENSSL_ia32cap_P
218
219 .globl  gcm_gmult_4bit
220 .type   gcm_gmult_4bit,\@function,2
221 .align  16
222 gcm_gmult_4bit:
223         push    %rbx
224         push    %rbp            # %rbp and %r12 are pushed exclusively in
225         push    %r12            # order to reuse Win64 exception handler...
226 .Lgmult_prologue:
227
228         movzb   15($Xi),$Zlo
229         lea     .Lrem_4bit(%rip),$rem_4bit
230 ___
231         &loop   ($Xi);
232 $code.=<<___;
233         mov     $Zlo,8($Xi)
234         mov     $Zhi,($Xi)
235
236         mov     16(%rsp),%rbx
237         lea     24(%rsp),%rsp
238 .Lgmult_epilogue:
239         ret
240 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
241 ___
242 \f
243 # per-function register layout
244 $inp="%rdx";
245 $len="%rcx";
246 $rem_8bit=$rem_4bit;
247
248 $code.=<<___;
249 .globl  gcm_ghash_4bit
250 .type   gcm_ghash_4bit,\@function,4
251 .align  16
252 gcm_ghash_4bit:
253         push    %rbx
254         push    %rbp
255         push    %r12
256         push    %r13
257         push    %r14
258         push    %r15
259         sub     \$280,%rsp
260 .Lghash_prologue:
261         mov     $inp,%r14               # reassign couple of args
262         mov     $len,%r15
263 ___
264 { my $inp="%r14";
265   my $dat="%edx";
266   my $len="%r15";
267   my @nhi=("%ebx","%ecx");
268   my @rem=("%r12","%r13");
269   my $Hshr4="%rbp";
270
271         &sub    ($Htbl,-128);           # size optimization
272         &lea    ($Hshr4,"16+128(%rsp)");
273         { my @lo =($nlo,$nhi);
274           my @hi =($Zlo,$Zhi);
275
276           &xor  ($dat,$dat);
277           for ($i=0,$j=-2;$i<18;$i++,$j++) {
278             &mov        ("$j(%rsp)",&LB($dat))          if ($i>1);
279             &or         ($lo[0],$tmp)                   if ($i>1);
280             &mov        (&LB($dat),&LB($lo[1]))         if ($i>0 && $i<17);
281             &shr        ($lo[1],4)                      if ($i>0 && $i<17);
282             &mov        ($tmp,$hi[1])                   if ($i>0 && $i<17);
283             &shr        ($hi[1],4)                      if ($i>0 && $i<17);
284             &mov        ("8*$j($Hshr4)",$hi[0])         if ($i>1);
285             &mov        ($hi[0],"16*$i+0-128($Htbl)")   if ($i<16);
286             &shl        (&LB($dat),4)                   if ($i>0 && $i<17);
287             &mov        ("8*$j-128($Hshr4)",$lo[0])     if ($i>1);
288             &mov        ($lo[0],"16*$i+8-128($Htbl)")   if ($i<16);
289             &shl        ($tmp,60)                       if ($i>0 && $i<17);
290
291             push        (@lo,shift(@lo));
292             push        (@hi,shift(@hi));
293           }
294         }
295         &add    ($Htbl,-128);
296         &mov    ($Zlo,"8($Xi)");
297         &mov    ($Zhi,"0($Xi)");
298         &add    ($len,$inp);            # pointer to the end of data
299         &lea    ($rem_8bit,".Lrem_8bit(%rip)");
300         &jmp    (".Louter_loop");
301
302 $code.=".align  16\n.Louter_loop:\n";
303         &xor    ($Zhi,"($inp)");
304         &mov    ("%rdx","8($inp)");
305         &lea    ($inp,"16($inp)");
306         &xor    ("%rdx",$Zlo);
307         &mov    ("($Xi)",$Zhi);
308         &mov    ("8($Xi)","%rdx");
309         &shr    ("%rdx",32);
310
311         &xor    ($nlo,$nlo);
312         &rol    ($dat,8);
313         &mov    (&LB($nlo),&LB($dat));
314         &movz   ($nhi[0],&LB($dat));
315         &shl    (&LB($nlo),4);
316         &shr    ($nhi[0],4);
317
318         for ($j=11,$i=0;$i<15;$i++) {
319             &rol        ($dat,8);
320             &xor        ($Zlo,"8($Htbl,$nlo)")                  if ($i>0);
321             &xor        ($Zhi,"($Htbl,$nlo)")                   if ($i>0);
322             &mov        ($Zlo,"8($Htbl,$nlo)")                  if ($i==0);
323             &mov        ($Zhi,"($Htbl,$nlo)")                   if ($i==0);
324
325             &mov        (&LB($nlo),&LB($dat));
326             &xor        ($Zlo,$tmp)                             if ($i>0);
327             &movzw      ($rem[1],"($rem_8bit,$rem[1],2)")       if ($i>0);
328
329             &movz       ($nhi[1],&LB($dat));
330             &shl        (&LB($nlo),4);
331             &movzb      ($rem[0],"(%rsp,$nhi[0])");
332
333             &shr        ($nhi[1],4)                             if ($i<14);
334             &and        ($nhi[1],0xf0)                          if ($i==14);
335             &shl        ($rem[1],48)                            if ($i>0);
336             &xor        ($rem[0],$Zlo);
337
338             &mov        ($tmp,$Zhi);
339             &xor        ($Zhi,$rem[1])                          if ($i>0);
340             &shr        ($Zlo,8);
341
342             &movz       ($rem[0],&LB($rem[0]));
343             &mov        ($dat,"$j($Xi)")                        if (--$j%4==0);
344             &shr        ($Zhi,8);
345
346             &xor        ($Zlo,"-128($Hshr4,$nhi[0],8)");
347             &shl        ($tmp,56);
348             &xor        ($Zhi,"($Hshr4,$nhi[0],8)");
349
350             unshift     (@nhi,pop(@nhi));               # "rotate" registers
351             unshift     (@rem,pop(@rem));
352         }
353         &movzw  ($rem[1],"($rem_8bit,$rem[1],2)");
354         &xor    ($Zlo,"8($Htbl,$nlo)");
355         &xor    ($Zhi,"($Htbl,$nlo)");
356
357         &shl    ($rem[1],48);
358         &xor    ($Zlo,$tmp);
359
360         &xor    ($Zhi,$rem[1]);
361         &movz   ($rem[0],&LB($Zlo));
362         &shr    ($Zlo,4);
363
364         &mov    ($tmp,$Zhi);
365         &shl    (&LB($rem[0]),4);
366         &shr    ($Zhi,4);
367
368         &xor    ($Zlo,"8($Htbl,$nhi[0])");
369         &movzw  ($rem[0],"($rem_8bit,$rem[0],2)");
370         &shl    ($tmp,60);
371
372         &xor    ($Zhi,"($Htbl,$nhi[0])");
373         &xor    ($Zlo,$tmp);
374         &shl    ($rem[0],48);
375
376         &bswap  ($Zlo);
377         &xor    ($Zhi,$rem[0]);
378
379         &bswap  ($Zhi);
380         &cmp    ($inp,$len);
381         &jb     (".Louter_loop");
382 }
383 $code.=<<___;
384         mov     $Zlo,8($Xi)
385         mov     $Zhi,($Xi)
386
387         lea     280(%rsp),%rsi
388         mov     0(%rsi),%r15
389         mov     8(%rsi),%r14
390         mov     16(%rsi),%r13
391         mov     24(%rsi),%r12
392         mov     32(%rsi),%rbp
393         mov     40(%rsi),%rbx
394         lea     48(%rsi),%rsp
395 .Lghash_epilogue:
396         ret
397 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
398 ___
399 \f
400 ######################################################################
401 # PCLMULQDQ version.
402
403 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
404                 ("%rdi","%rsi","%rdx","%rcx");  # Unix order
405
406 ($Xi,$Xhi)=("%xmm0","%xmm1");   $Hkey="%xmm2";
407 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
408
409 sub clmul64x64_T2 {     # minimal register pressure
410 my ($Xhi,$Xi,$Hkey,$HK)=@_;
411
412 if (!defined($HK)) {    $HK = $T2;
413 $code.=<<___;
414         movdqa          $Xi,$Xhi                #
415         pshufd          \$0b01001110,$Xi,$T1
416         pshufd          \$0b01001110,$Hkey,$T2
417         pxor            $Xi,$T1                 #
418         pxor            $Hkey,$T2
419 ___
420 } else {
421 $code.=<<___;
422         movdqa          $Xi,$Xhi                #
423         pshufd          \$0b01001110,$Xi,$T1
424         pxor            $Xi,$T1                 #
425 ___
426 }
427 $code.=<<___;
428         pclmulqdq       \$0x00,$Hkey,$Xi        #######
429         pclmulqdq       \$0x11,$Hkey,$Xhi       #######
430         pclmulqdq       \$0x00,$HK,$T1          #######
431         pxor            $Xi,$T1                 #
432         pxor            $Xhi,$T1                #
433
434         movdqa          $T1,$T2                 #
435         psrldq          \$8,$T1
436         pslldq          \$8,$T2                 #
437         pxor            $T1,$Xhi
438         pxor            $T2,$Xi                 #
439 ___
440 }
441
442 sub reduction_alg9 {    # 17/11 times faster than Intel version
443 my ($Xhi,$Xi) = @_;
444
445 $code.=<<___;
446         # 1st phase
447         movdqa          $Xi,$T2                 #
448         movdqa          $Xi,$T1
449         psllq           \$5,$Xi
450         pxor            $Xi,$T1                 #
451         psllq           \$1,$Xi
452         pxor            $T1,$Xi                 #
453         psllq           \$57,$Xi                #
454         movdqa          $Xi,$T1                 #
455         pslldq          \$8,$Xi
456         psrldq          \$8,$T1                 #       
457         pxor            $T2,$Xi
458         pxor            $T1,$Xhi                #
459
460         # 2nd phase
461         movdqa          $Xi,$T2
462         psrlq           \$1,$Xi
463         pxor            $T2,$Xhi                #
464         pxor            $Xi,$T2
465         psrlq           \$5,$Xi
466         pxor            $T2,$Xi                 #
467         psrlq           \$1,$Xi                 #
468         pxor            $Xhi,$Xi                #
469 ___
470 }
471 \f
472 { my ($Htbl,$Xip)=@_4args;
473   my $HK="%xmm6";
474
475 $code.=<<___;
476 .globl  gcm_init_clmul
477 .type   gcm_init_clmul,\@abi-omnipotent
478 .align  16
479 gcm_init_clmul:
480 .L_init_clmul:
481 ___
482 $code.=<<___ if ($win64);
483 .LSEH_begin_gcm_init_clmul:
484         # I can't trust assembler to use specific encoding:-(
485         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
486         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
487 ___
488 $code.=<<___;
489         movdqu          ($Xip),$Hkey
490         pshufd          \$0b01001110,$Hkey,$Hkey        # dword swap
491
492         # <<1 twist
493         pshufd          \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
494         movdqa          $Hkey,$T1
495         psllq           \$1,$Hkey
496         pxor            $T3,$T3                 #
497         psrlq           \$63,$T1
498         pcmpgtd         $T2,$T3                 # broadcast carry bit
499         pslldq          \$8,$T1
500         por             $T1,$Hkey               # H<<=1
501
502         # magic reduction
503         pand            .L0x1c2_polynomial(%rip),$T3
504         pxor            $T3,$Hkey               # if(carry) H^=0x1c2_polynomial
505
506         # calculate H^2
507         pshufd          \$0b01001110,$Hkey,$HK
508         movdqa          $Hkey,$Xi
509         pxor            $Hkey,$HK
510 ___
511         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);
512         &reduction_alg9 ($Xhi,$Xi);
513 $code.=<<___;
514         pshufd          \$0b01001110,$Hkey,$T1
515         pshufd          \$0b01001110,$Xi,$T2
516         pxor            $Hkey,$T1               # Karatsuba pre-processing
517         movdqu          $Hkey,0x00($Htbl)       # save H
518         pxor            $Xi,$T2                 # Karatsuba pre-processing
519         movdqu          $Xi,0x10($Htbl)         # save H^2
520         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
521         movdqu          $T2,0x20($Htbl)         # save Karatsuba "salt"
522 ___
523 if ($do4xaggr) {
524         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^3
525         &reduction_alg9 ($Xhi,$Xi);
526 $code.=<<___;
527         movdqa          $Xi,$T3
528 ___
529         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^4
530         &reduction_alg9 ($Xhi,$Xi);
531 $code.=<<___;
532         pshufd          \$0b01001110,$T3,$T1
533         pshufd          \$0b01001110,$Xi,$T2
534         pxor            $T3,$T1                 # Karatsuba pre-processing
535         movdqu          $T3,0x30($Htbl)         # save H^3
536         pxor            $Xi,$T2                 # Karatsuba pre-processing
537         movdqu          $Xi,0x40($Htbl)         # save H^4
538         palignr         \$8,$T1,$T2             # low part is H^3.lo^H^3.hi...
539         movdqu          $T2,0x50($Htbl)         # save Karatsuba "salt"
540 ___
541 }
542 $code.=<<___ if ($win64);
543         movaps  (%rsp),%xmm6
544         lea     0x18(%rsp),%rsp
545 .LSEH_end_gcm_init_clmul:
546 ___
547 $code.=<<___;
548         ret
549 .size   gcm_init_clmul,.-gcm_init_clmul
550 ___
551 }
552
553 { my ($Xip,$Htbl)=@_4args;
554
555 $code.=<<___;
556 .globl  gcm_gmult_clmul
557 .type   gcm_gmult_clmul,\@abi-omnipotent
558 .align  16
559 gcm_gmult_clmul:
560 .L_gmult_clmul:
561         movdqu          ($Xip),$Xi
562         movdqa          .Lbswap_mask(%rip),$T3
563         movdqu          ($Htbl),$Hkey
564         movdqu          0x20($Htbl),$T2
565         pshufb          $T3,$Xi
566 ___
567         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
568 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
569         # experimental alternative. special thing about is that there
570         # no dependency between the two multiplications... 
571         mov             \$`0xE1<<1`,%eax
572         mov             \$0xA040608020C0E000,%r10       # ((7..0)·0xE0)&0xff
573         mov             \$0x07,%r11d
574         movq            %rax,$T1
575         movq            %r10,$T2
576         movq            %r11,$T3                # borrow $T3
577         pand            $Xi,$T3
578         pshufb          $T3,$T2                 # ($Xi&7)·0xE0
579         movq            %rax,$T3
580         pclmulqdq       \$0x00,$Xi,$T1          # Â·(0xE1<<1)
581         pxor            $Xi,$T2
582         pslldq          \$15,$T2
583         paddd           $T2,$T2                 # <<(64+56+1)
584         pxor            $T2,$Xi
585         pclmulqdq       \$0x01,$T3,$Xi
586         movdqa          .Lbswap_mask(%rip),$T3  # reload $T3
587         psrldq          \$1,$T1
588         pxor            $T1,$Xhi
589         pslldq          \$7,$Xi
590         pxor            $Xhi,$Xi
591 ___
592 $code.=<<___;
593         pshufb          $T3,$Xi
594         movdqu          $Xi,($Xip)
595         ret
596 .size   gcm_gmult_clmul,.-gcm_gmult_clmul
597 ___
598 }
599 \f
600 { my ($Xip,$Htbl,$inp,$len)=@_4args;
601   my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
602   my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
603
604 $code.=<<___;
605 .globl  gcm_ghash_clmul
606 .type   gcm_ghash_clmul,\@abi-omnipotent
607 .align  32
608 gcm_ghash_clmul:
609 .L_ghash_clmul:
610 ___
611 $code.=<<___ if ($win64);
612         lea     -0x88(%rsp),%rax
613 .LSEH_begin_gcm_ghash_clmul:
614         # I can't trust assembler to use specific encoding:-(
615         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
616         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
617         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
618         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
619         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
620         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
621         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
622         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
623         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
624         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
625         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
626 ___
627 $code.=<<___;
628         movdqa          .Lbswap_mask(%rip),$T3
629
630         movdqu          ($Xip),$Xi
631         movdqu          ($Htbl),$Hkey
632         movdqu          0x20($Htbl),$HK
633         pshufb          $T3,$Xi
634
635         sub             \$0x10,$len
636         jz              .Lodd_tail
637
638         movdqu          0x10($Htbl),$Hkey2
639 ___
640 if ($do4xaggr) {
641 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
642
643 $code.=<<___;
644         mov             OPENSSL_ia32cap_P+4(%rip),%eax
645         cmp             \$0x30,$len
646         jb              .Lskip4x
647
648         and             \$`1<<26|1<<22`,%eax    # isolate MOVBE+XSAVE
649         cmp             \$`1<<22`,%eax          # check for MOVBE without XSAVE
650         je              .Lskip4x
651
652         sub             \$0x30,$len
653         mov             \$0xA040608020C0E000,%rax       # ((7..0)·0xE0)&0xff
654         movdqu          0x30($Htbl),$Hkey3
655         movdqu          0x40($Htbl),$Hkey4
656
657         #######
658         # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
659         #
660         movdqu          0x30($inp),$Xln
661          movdqu         0x20($inp),$Xl
662         pshufb          $T3,$Xln
663          pshufb         $T3,$Xl
664         movdqa          $Xln,$Xhn
665         pshufd          \$0b01001110,$Xln,$Xmn
666         pxor            $Xln,$Xmn
667         pclmulqdq       \$0x00,$Hkey,$Xln
668         pclmulqdq       \$0x11,$Hkey,$Xhn
669         pclmulqdq       \$0x00,$HK,$Xmn
670
671         movdqa          $Xl,$Xh
672         pshufd          \$0b01001110,$Xl,$Xm
673         pxor            $Xl,$Xm
674         pclmulqdq       \$0x00,$Hkey2,$Xl
675         pclmulqdq       \$0x11,$Hkey2,$Xh
676         xorps           $Xl,$Xln
677         pclmulqdq       \$0x10,$HK,$Xm
678         xorps           $Xh,$Xhn
679         movups          0x50($Htbl),$HK
680         xorps           $Xm,$Xmn
681
682         movdqu          0x10($inp),$Xl
683          movdqu         0($inp),$T1
684         pshufb          $T3,$Xl
685          pshufb         $T3,$T1
686         movdqa          $Xl,$Xh
687         pshufd          \$0b01001110,$Xl,$Xm
688          pxor           $T1,$Xi
689         pxor            $Xl,$Xm
690         pclmulqdq       \$0x00,$Hkey3,$Xl
691          movdqa         $Xi,$Xhi
692          pshufd         \$0b01001110,$Xi,$T1
693          pxor           $Xi,$T1
694         pclmulqdq       \$0x11,$Hkey3,$Xh
695         xorps           $Xl,$Xln
696         pclmulqdq       \$0x00,$HK,$Xm
697         xorps           $Xh,$Xhn
698
699         lea     0x40($inp),$inp
700         sub     \$0x40,$len
701         jc      .Ltail4x
702
703         jmp     .Lmod4_loop
704 .align  32
705 .Lmod4_loop:
706         pclmulqdq       \$0x00,$Hkey4,$Xi
707         xorps           $Xm,$Xmn
708          movdqu         0x30($inp),$Xl
709          pshufb         $T3,$Xl
710         pclmulqdq       \$0x11,$Hkey4,$Xhi
711         xorps           $Xln,$Xi
712          movdqu         0x20($inp),$Xln
713          movdqa         $Xl,$Xh
714          pshufd         \$0b01001110,$Xl,$Xm
715         pclmulqdq       \$0x10,$HK,$T1
716         xorps           $Xhn,$Xhi
717          pxor           $Xl,$Xm
718          pshufb         $T3,$Xln
719         movups          0x20($Htbl),$HK
720          pclmulqdq      \$0x00,$Hkey,$Xl
721         xorps           $Xmn,$T1
722          movdqa         $Xln,$Xhn
723          pshufd         \$0b01001110,$Xln,$Xmn
724
725         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
726          pxor           $Xln,$Xmn
727         pxor            $Xhi,$T1                #
728         movdqa          $T1,$T2                 #
729         pslldq          \$8,$T1
730          pclmulqdq      \$0x11,$Hkey,$Xh
731         psrldq          \$8,$T2                 #
732         pxor            $T1,$Xi
733         movdqa          .L7_mask(%rip),$T1
734         pxor            $T2,$Xhi                #
735         movq            %rax,$T2
736
737         pand            $Xi,$T1                 # 1st phase
738         pshufb          $T1,$T2                 #
739          pclmulqdq      \$0x00,$HK,$Xm
740         pxor            $Xi,$T2                 #
741         psllq           \$57,$T2                #
742         movdqa          $T2,$T1                 #
743         pslldq          \$8,$T2
744          pclmulqdq      \$0x00,$Hkey2,$Xln
745         psrldq          \$8,$T1                 #       
746         pxor            $T2,$Xi
747         pxor            $T1,$Xhi                #
748         movdqu          0($inp),$T1
749
750         movdqa          $Xi,$T2                 # 2nd phase
751         psrlq           \$1,$Xi
752          pclmulqdq      \$0x11,$Hkey2,$Xhn
753          xorps          $Xl,$Xln
754          movdqu         0x10($inp),$Xl
755          pshufb         $T3,$Xl
756          pclmulqdq      \$0x10,$HK,$Xmn
757          xorps          $Xh,$Xhn
758          movups         0x50($Htbl),$HK
759         pshufb          $T3,$T1
760         pxor            $T2,$Xhi                #
761         pxor            $Xi,$T2
762         psrlq           \$5,$Xi
763
764          movdqa         $Xl,$Xh
765          pxor           $Xm,$Xmn
766          pshufd         \$0b01001110,$Xl,$Xm
767          pxor           $Xl,$Xm
768          pclmulqdq      \$0x00,$Hkey3,$Xl
769         pxor            $T2,$Xi                 #
770         pxor            $T1,$Xhi
771         psrlq           \$1,$Xi                 #
772          pclmulqdq      \$0x11,$Hkey3,$Xh
773          xorps          $Xl,$Xln
774         pxor            $Xhi,$Xi                #
775
776          pclmulqdq      \$0x00,$HK,$Xm
777          xorps          $Xh,$Xhn
778
779         movdqa          $Xi,$Xhi
780         pshufd          \$0b01001110,$Xi,$T1
781         pxor            $Xi,$T1
782
783         lea     0x40($inp),$inp
784         sub     \$0x40,$len
785         jnc     .Lmod4_loop
786
787 .Ltail4x:
788         pclmulqdq       \$0x00,$Hkey4,$Xi
789         xorps           $Xm,$Xmn
790         pclmulqdq       \$0x11,$Hkey4,$Xhi
791         xorps           $Xln,$Xi
792         pclmulqdq       \$0x10,$HK,$T1
793         xorps           $Xhn,$Xhi
794         pxor            $Xi,$Xhi                # aggregated Karatsuba post-processing
795         pxor            $Xmn,$T1
796
797         pxor            $Xhi,$T1                #
798         pxor            $Xi,$Xhi
799
800         movdqa          $T1,$T2                 #
801         psrldq          \$8,$T1
802         pslldq          \$8,$T2                 #
803         pxor            $T1,$Xhi
804         pxor            $T2,$Xi                 #
805 ___
806         &reduction_alg9($Xhi,$Xi);
807 $code.=<<___;
808         add     \$0x40,$len
809         jz      .Ldone
810         movdqu  0x20($Htbl),$HK
811         sub     \$0x10,$len
812         jz      .Lodd_tail
813 .Lskip4x:
814 ___
815 }
816 $code.=<<___;
817         #######
818         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
819         #       [(H*Ii+1) + (H*Xi+1)] mod P =
820         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
821         #
822         movdqu          ($inp),$T1              # Ii
823         movdqu          16($inp),$Xln           # Ii+1
824         pshufb          $T3,$T1
825         pshufb          $T3,$Xln
826         pxor            $T1,$Xi                 # Ii+Xi
827
828         movdqa          $Xln,$Xhn
829         pshufd          \$0b01001110,$Xln,$Xmn
830         pxor            $Xln,$Xmn
831         pclmulqdq       \$0x00,$Hkey,$Xln
832         pclmulqdq       \$0x11,$Hkey,$Xhn
833         pclmulqdq       \$0x00,$HK,$Xmn
834
835         lea             32($inp),$inp           # i+=2
836         nop
837         sub             \$0x20,$len
838         jbe             .Leven_tail
839         nop
840         jmp             .Lmod_loop
841
842 .align  32
843 .Lmod_loop:
844         movdqa          $Xi,$Xhi
845         movdqa          $Xmn,$T1
846         pshufd          \$0b01001110,$Xi,$Xmn   #
847         pxor            $Xi,$Xmn                #
848
849         pclmulqdq       \$0x00,$Hkey2,$Xi
850         pclmulqdq       \$0x11,$Hkey2,$Xhi
851         pclmulqdq       \$0x10,$HK,$Xmn
852
853         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
854         pxor            $Xhn,$Xhi
855           movdqu        ($inp),$Xhn             # Ii
856         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
857           pshufb        $T3,$Xhn
858           movdqu        16($inp),$Xln           # Ii+1
859
860         pxor            $Xhi,$T1
861           pxor          $Xhn,$Xhi               # "Ii+Xi", consume early
862         pxor            $T1,$Xmn
863          pshufb         $T3,$Xln
864         movdqa          $Xmn,$T1                #
865         psrldq          \$8,$T1
866         pslldq          \$8,$Xmn                #
867         pxor            $T1,$Xhi
868         pxor            $Xmn,$Xi                #
869
870         movdqa          $Xln,$Xhn               #
871
872           movdqa        $Xi,$T2                 # 1st phase
873           movdqa        $Xi,$T1
874           psllq         \$5,$Xi
875           pxor          $Xi,$T1                 #
876         pclmulqdq       \$0x00,$Hkey,$Xln       #######
877           psllq         \$1,$Xi
878           pxor          $T1,$Xi                 #
879           psllq         \$57,$Xi                #
880           movdqa        $Xi,$T1                 #
881           pslldq        \$8,$Xi
882           psrldq        \$8,$T1                 #       
883           pxor          $T2,$Xi
884         pshufd          \$0b01001110,$Xhn,$Xmn
885           pxor          $T1,$Xhi                #
886         pxor            $Xhn,$Xmn               #
887
888         pclmulqdq       \$0x11,$Hkey,$Xhn       #######
889           movdqa        $Xi,$T2                 # 2nd phase
890           psrlq         \$1,$Xi
891           pxor          $T2,$Xhi                #
892           pxor          $Xi,$T2
893           psrlq         \$5,$Xi
894           pxor          $T2,$Xi                 #
895         lea             32($inp),$inp
896           psrlq         \$1,$Xi                 #
897         pclmulqdq       \$0x00,$HK,$Xmn         #######
898           pxor          $Xhi,$Xi                #
899           .byte         0x66,0x90
900
901         sub             \$0x20,$len
902         ja              .Lmod_loop
903
904 .Leven_tail:
905          movdqa         $Xi,$Xhi
906          movdqa         $Xmn,$T1
907          pshufd         \$0b01001110,$Xi,$Xmn   #
908          pxor           $Xi,$Xmn                #
909
910         pclmulqdq       \$0x00,$Hkey2,$Xi
911         pclmulqdq       \$0x11,$Hkey2,$Xhi
912         pclmulqdq       \$0x10,$HK,$Xmn
913
914         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
915         pxor            $Xhn,$Xhi
916         pxor            $Xi,$T1
917         pxor            $Xhi,$T1
918         pxor            $T1,$Xmn
919         movdqa          $Xmn,$T1                #
920         psrldq          \$8,$T1
921         pslldq          \$8,$Xmn                #
922         pxor            $T1,$Xhi
923         pxor            $Xmn,$Xi                #
924 ___
925         &reduction_alg9 ($Xhi,$Xi);
926 $code.=<<___;
927         test            $len,$len
928         jnz             .Ldone
929
930 .Lodd_tail:
931         movdqu          ($inp),$T1              # Ii
932         pshufb          $T3,$T1
933         pxor            $T1,$Xi                 # Ii+Xi
934 ___
935         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H*(Ii+Xi)
936         &reduction_alg9 ($Xhi,$Xi);
937 $code.=<<___;
938 .Ldone:
939         pshufb          $T3,$Xi
940         movdqu          $Xi,($Xip)
941 ___
942 $code.=<<___ if ($win64);
943         movaps  (%rsp),%xmm6
944         movaps  0x10(%rsp),%xmm7
945         movaps  0x20(%rsp),%xmm8
946         movaps  0x30(%rsp),%xmm9
947         movaps  0x40(%rsp),%xmm10
948         movaps  0x50(%rsp),%xmm11
949         movaps  0x60(%rsp),%xmm12
950         movaps  0x70(%rsp),%xmm13
951         movaps  0x80(%rsp),%xmm14
952         movaps  0x90(%rsp),%xmm15
953         lea     0xa8(%rsp),%rsp
954 .LSEH_end_gcm_ghash_clmul:
955 ___
956 $code.=<<___;
957         ret
958 .size   gcm_ghash_clmul,.-gcm_ghash_clmul
959 ___
960 }
961 \f
962 $code.=<<___;
963 .globl  gcm_init_avx
964 .type   gcm_init_avx,\@abi-omnipotent
965 .align  32
966 gcm_init_avx:
967 ___
968 if ($avx) {
969 my ($Htbl,$Xip)=@_4args;
970 my $HK="%xmm6";
971
972 $code.=<<___ if ($win64);
973 .LSEH_begin_gcm_init_avx:
974         # I can't trust assembler to use specific encoding:-(
975         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
976         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
977 ___
978 $code.=<<___;
979         vzeroupper
980
981         vmovdqu         ($Xip),$Hkey
982         vpshufd         \$0b01001110,$Hkey,$Hkey        # dword swap
983
984         # <<1 twist
985         vpshufd         \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
986         vpsrlq          \$63,$Hkey,$T1
987         vpsllq          \$1,$Hkey,$Hkey
988         vpxor           $T3,$T3,$T3             #
989         vpcmpgtd        $T2,$T3,$T3             # broadcast carry bit
990         vpslldq         \$8,$T1,$T1
991         vpor            $T1,$Hkey,$Hkey         # H<<=1
992
993         # magic reduction
994         vpand           .L0x1c2_polynomial(%rip),$T3,$T3
995         vpxor           $T3,$Hkey,$Hkey         # if(carry) H^=0x1c2_polynomial
996
997         vpunpckhqdq     $Hkey,$Hkey,$HK
998         vmovdqa         $Hkey,$Xi
999         vpxor           $Hkey,$HK,$HK
1000         mov             \$4,%r10                # up to H^8
1001         jmp             .Linit_start_avx
1002 ___
1003
1004 sub clmul64x64_avx {
1005 my ($Xhi,$Xi,$Hkey,$HK)=@_;
1006
1007 if (!defined($HK)) {    $HK = $T2;
1008 $code.=<<___;
1009         vpunpckhqdq     $Xi,$Xi,$T1
1010         vpunpckhqdq     $Hkey,$Hkey,$T2
1011         vpxor           $Xi,$T1,$T1             #
1012         vpxor           $Hkey,$T2,$T2
1013 ___
1014 } else {
1015 $code.=<<___;
1016         vpunpckhqdq     $Xi,$Xi,$T1
1017         vpxor           $Xi,$T1,$T1             #
1018 ___
1019 }
1020 $code.=<<___;
1021         vpclmulqdq      \$0x11,$Hkey,$Xi,$Xhi   #######
1022         vpclmulqdq      \$0x00,$Hkey,$Xi,$Xi    #######
1023         vpclmulqdq      \$0x00,$HK,$T1,$T1      #######
1024         vpxor           $Xi,$Xhi,$T2            #
1025         vpxor           $T2,$T1,$T1             #
1026
1027         vpslldq         \$8,$T1,$T2             #
1028         vpsrldq         \$8,$T1,$T1
1029         vpxor           $T2,$Xi,$Xi             #
1030         vpxor           $T1,$Xhi,$Xhi
1031 ___
1032 }
1033
1034 sub reduction_avx {
1035 my ($Xhi,$Xi) = @_;
1036
1037 $code.=<<___;
1038         vpsllq          \$57,$Xi,$T1            # 1st phase
1039         vpsllq          \$62,$Xi,$T2
1040         vpxor           $T1,$T2,$T2             #
1041         vpsllq          \$63,$Xi,$T1
1042         vpxor           $T1,$T2,$T2             #
1043         vpslldq         \$8,$T2,$T1             #
1044         vpsrldq         \$8,$T2,$T2
1045         vpxor           $T1,$Xi,$Xi             #
1046         vpxor           $T2,$Xhi,$Xhi
1047
1048         vpsrlq          \$1,$Xi,$T2             # 2nd phase
1049         vpxor           $Xi,$Xhi,$Xhi
1050         vpxor           $T2,$Xi,$Xi             #
1051         vpsrlq          \$5,$T2,$T2
1052         vpxor           $T2,$Xi,$Xi             #
1053         vpsrlq          \$1,$Xi,$Xi             #
1054         vpxor           $Xhi,$Xi,$Xi            #
1055 ___
1056 }
1057
1058 $code.=<<___;
1059 .align  32
1060 .Linit_loop_avx:
1061         vpalignr        \$8,$T1,$T2,$T3         # low part is H.lo^H.hi...
1062         vmovdqu         $T3,-0x10($Htbl)        # save Karatsuba "salt"
1063 ___
1064         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^3,5,7
1065         &reduction_avx  ($Xhi,$Xi);
1066 $code.=<<___;
1067 .Linit_start_avx:
1068         vmovdqa         $Xi,$T3
1069 ___
1070         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^2,4,6,8
1071         &reduction_avx  ($Xhi,$Xi);
1072 $code.=<<___;
1073         vpshufd         \$0b01001110,$T3,$T1
1074         vpshufd         \$0b01001110,$Xi,$T2
1075         vpxor           $T3,$T1,$T1             # Karatsuba pre-processing
1076         vmovdqu         $T3,0x00($Htbl)         # save H^1,3,5,7
1077         vpxor           $Xi,$T2,$T2             # Karatsuba pre-processing
1078         vmovdqu         $Xi,0x10($Htbl)         # save H^2,4,6,8
1079         lea             0x30($Htbl),$Htbl
1080         sub             \$1,%r10
1081         jnz             .Linit_loop_avx
1082
1083         vpalignr        \$8,$T2,$T1,$T3         # last "salt" is flipped
1084         vmovdqu         $T3,-0x10($Htbl)
1085
1086         vzeroupper
1087 ___
1088 $code.=<<___ if ($win64);
1089         movaps  (%rsp),%xmm6
1090         lea     0x18(%rsp),%rsp
1091 .LSEH_end_gcm_init_avx:
1092 ___
1093 $code.=<<___;
1094         ret
1095 .size   gcm_init_avx,.-gcm_init_avx
1096 ___
1097 } else {
1098 $code.=<<___;
1099         jmp     .L_init_clmul
1100 .size   gcm_init_avx,.-gcm_init_avx
1101 ___
1102 }
1103
1104 $code.=<<___;
1105 .globl  gcm_gmult_avx
1106 .type   gcm_gmult_avx,\@abi-omnipotent
1107 .align  32
1108 gcm_gmult_avx:
1109         jmp     .L_gmult_clmul
1110 .size   gcm_gmult_avx,.-gcm_gmult_avx
1111 ___
1112 \f
1113 $code.=<<___;
1114 .globl  gcm_ghash_avx
1115 .type   gcm_ghash_avx,\@abi-omnipotent
1116 .align  32
1117 gcm_ghash_avx:
1118 ___
1119 if ($avx) {
1120 my ($Xip,$Htbl,$inp,$len)=@_4args;
1121 my ($Xlo,$Xhi,$Xmi,
1122     $Zlo,$Zhi,$Zmi,
1123     $Hkey,$HK,$T1,$T2,
1124     $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1125
1126 $code.=<<___ if ($win64);
1127         lea     -0x88(%rsp),%rax
1128 .LSEH_begin_gcm_ghash_avx:
1129         # I can't trust assembler to use specific encoding:-(
1130         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1131         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
1132         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
1133         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
1134         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
1135         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
1136         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
1137         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
1138         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
1139         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
1140         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
1141 ___
1142 $code.=<<___;
1143         vzeroupper
1144
1145         vmovdqu         ($Xip),$Xi              # load $Xi
1146         lea             .L0x1c2_polynomial(%rip),%r10
1147         lea             0x40($Htbl),$Htbl       # size optimization
1148         vmovdqu         .Lbswap_mask(%rip),$bswap
1149         vpshufb         $bswap,$Xi,$Xi
1150         cmp             \$0x80,$len
1151         jb              .Lshort_avx
1152         sub             \$0x80,$len
1153
1154         vmovdqu         0x70($inp),$Ii          # I[7]
1155         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1156         vpshufb         $bswap,$Ii,$Ii
1157         vmovdqu         0x20-0x40($Htbl),$HK
1158
1159         vpunpckhqdq     $Ii,$Ii,$T2
1160          vmovdqu        0x60($inp),$Ij          # I[6]
1161         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1162         vpxor           $Ii,$T2,$T2
1163          vpshufb        $bswap,$Ij,$Ij
1164         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1165          vmovdqu        0x10-0x40($Htbl),$Hkey  # $Hkey^2
1166          vpunpckhqdq    $Ij,$Ij,$T1
1167          vmovdqu        0x50($inp),$Ii          # I[5]
1168         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1169          vpxor          $Ij,$T1,$T1
1170
1171          vpshufb        $bswap,$Ii,$Ii
1172         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1173          vpunpckhqdq    $Ii,$Ii,$T2
1174         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1175          vmovdqu        0x30-0x40($Htbl),$Hkey  # $Hkey^3
1176          vpxor          $Ii,$T2,$T2
1177          vmovdqu        0x40($inp),$Ij          # I[4]
1178         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1179          vmovdqu        0x50-0x40($Htbl),$HK
1180
1181          vpshufb        $bswap,$Ij,$Ij
1182         vpxor           $Xlo,$Zlo,$Zlo
1183         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1184         vpxor           $Xhi,$Zhi,$Zhi
1185          vpunpckhqdq    $Ij,$Ij,$T1
1186         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1187          vmovdqu        0x40-0x40($Htbl),$Hkey  # $Hkey^4
1188         vpxor           $Xmi,$Zmi,$Zmi
1189         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1190          vpxor          $Ij,$T1,$T1
1191
1192          vmovdqu        0x30($inp),$Ii          # I[3]
1193         vpxor           $Zlo,$Xlo,$Xlo
1194         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1195         vpxor           $Zhi,$Xhi,$Xhi
1196          vpshufb        $bswap,$Ii,$Ii
1197         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1198          vmovdqu        0x60-0x40($Htbl),$Hkey  # $Hkey^5
1199         vpxor           $Zmi,$Xmi,$Xmi
1200          vpunpckhqdq    $Ii,$Ii,$T2
1201         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1202          vmovdqu        0x80-0x40($Htbl),$HK
1203          vpxor          $Ii,$T2,$T2
1204
1205          vmovdqu        0x20($inp),$Ij          # I[2]
1206         vpxor           $Xlo,$Zlo,$Zlo
1207         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1208         vpxor           $Xhi,$Zhi,$Zhi
1209          vpshufb        $bswap,$Ij,$Ij
1210         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1211          vmovdqu        0x70-0x40($Htbl),$Hkey  # $Hkey^6
1212         vpxor           $Xmi,$Zmi,$Zmi
1213          vpunpckhqdq    $Ij,$Ij,$T1
1214         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1215          vpxor          $Ij,$T1,$T1
1216
1217          vmovdqu        0x10($inp),$Ii          # I[1]
1218         vpxor           $Zlo,$Xlo,$Xlo
1219         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1220         vpxor           $Zhi,$Xhi,$Xhi
1221          vpshufb        $bswap,$Ii,$Ii
1222         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1223          vmovdqu        0x90-0x40($Htbl),$Hkey  # $Hkey^7
1224         vpxor           $Zmi,$Xmi,$Xmi
1225          vpunpckhqdq    $Ii,$Ii,$T2
1226         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1227          vmovdqu        0xb0-0x40($Htbl),$HK
1228          vpxor          $Ii,$T2,$T2
1229
1230          vmovdqu        ($inp),$Ij              # I[0]
1231         vpxor           $Xlo,$Zlo,$Zlo
1232         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1233         vpxor           $Xhi,$Zhi,$Zhi
1234          vpshufb        $bswap,$Ij,$Ij
1235         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1236          vmovdqu        0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1237         vpxor           $Xmi,$Zmi,$Zmi
1238         vpclmulqdq      \$0x10,$HK,$T2,$Xmi
1239
1240         lea             0x80($inp),$inp
1241         cmp             \$0x80,$len
1242         jb              .Ltail_avx
1243
1244         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1245         sub             \$0x80,$len
1246         jmp             .Loop8x_avx
1247
1248 .align  32
1249 .Loop8x_avx:
1250         vpunpckhqdq     $Ij,$Ij,$T1
1251          vmovdqu        0x70($inp),$Ii          # I[7]
1252         vpxor           $Xlo,$Zlo,$Zlo
1253         vpxor           $Ij,$T1,$T1
1254         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xi
1255          vpshufb        $bswap,$Ii,$Ii
1256         vpxor           $Xhi,$Zhi,$Zhi
1257         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xo
1258          vmovdqu        0x00-0x40($Htbl),$Hkey  # $Hkey^1
1259          vpunpckhqdq    $Ii,$Ii,$T2
1260         vpxor           $Xmi,$Zmi,$Zmi
1261         vpclmulqdq      \$0x00,$HK,$T1,$Tred
1262          vmovdqu        0x20-0x40($Htbl),$HK
1263          vpxor          $Ii,$T2,$T2
1264
1265           vmovdqu       0x60($inp),$Ij          # I[6]
1266          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1267         vpxor           $Zlo,$Xi,$Xi            # collect result
1268           vpshufb       $bswap,$Ij,$Ij
1269          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1270         vxorps          $Zhi,$Xo,$Xo
1271           vmovdqu       0x10-0x40($Htbl),$Hkey  # $Hkey^2
1272          vpunpckhqdq    $Ij,$Ij,$T1
1273          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1274         vpxor           $Zmi,$Tred,$Tred
1275          vxorps         $Ij,$T1,$T1
1276
1277           vmovdqu       0x50($inp),$Ii          # I[5]
1278         vpxor           $Xi,$Tred,$Tred         # aggregated Karatsuba post-processing
1279          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1280         vpxor           $Xo,$Tred,$Tred
1281         vpslldq         \$8,$Tred,$T2
1282          vpxor          $Xlo,$Zlo,$Zlo
1283          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1284         vpsrldq         \$8,$Tred,$Tred
1285         vpxor           $T2, $Xi, $Xi
1286           vmovdqu       0x30-0x40($Htbl),$Hkey  # $Hkey^3
1287           vpshufb       $bswap,$Ii,$Ii
1288         vxorps          $Tred,$Xo, $Xo
1289          vpxor          $Xhi,$Zhi,$Zhi
1290          vpunpckhqdq    $Ii,$Ii,$T2
1291          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1292           vmovdqu       0x50-0x40($Htbl),$HK
1293          vpxor          $Ii,$T2,$T2
1294          vpxor          $Xmi,$Zmi,$Zmi
1295
1296           vmovdqu       0x40($inp),$Ij          # I[4]
1297         vpalignr        \$8,$Xi,$Xi,$Tred       # 1st phase
1298          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1299           vpshufb       $bswap,$Ij,$Ij
1300          vpxor          $Zlo,$Xlo,$Xlo
1301          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1302           vmovdqu       0x40-0x40($Htbl),$Hkey  # $Hkey^4
1303          vpunpckhqdq    $Ij,$Ij,$T1
1304          vpxor          $Zhi,$Xhi,$Xhi
1305          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1306          vxorps         $Ij,$T1,$T1
1307          vpxor          $Zmi,$Xmi,$Xmi
1308
1309           vmovdqu       0x30($inp),$Ii          # I[3]
1310         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1311          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1312           vpshufb       $bswap,$Ii,$Ii
1313          vpxor          $Xlo,$Zlo,$Zlo
1314          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1315           vmovdqu       0x60-0x40($Htbl),$Hkey  # $Hkey^5
1316          vpunpckhqdq    $Ii,$Ii,$T2
1317          vpxor          $Xhi,$Zhi,$Zhi
1318          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1319           vmovdqu       0x80-0x40($Htbl),$HK
1320          vpxor          $Ii,$T2,$T2
1321          vpxor          $Xmi,$Zmi,$Zmi
1322
1323           vmovdqu       0x20($inp),$Ij          # I[2]
1324          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1325           vpshufb       $bswap,$Ij,$Ij
1326          vpxor          $Zlo,$Xlo,$Xlo
1327          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1328           vmovdqu       0x70-0x40($Htbl),$Hkey  # $Hkey^6
1329          vpunpckhqdq    $Ij,$Ij,$T1
1330          vpxor          $Zhi,$Xhi,$Xhi
1331          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1332          vpxor          $Ij,$T1,$T1
1333          vpxor          $Zmi,$Xmi,$Xmi
1334         vxorps          $Tred,$Xi,$Xi
1335
1336           vmovdqu       0x10($inp),$Ii          # I[1]
1337         vpalignr        \$8,$Xi,$Xi,$Tred       # 2nd phase
1338          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1339           vpshufb       $bswap,$Ii,$Ii
1340          vpxor          $Xlo,$Zlo,$Zlo
1341          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1342           vmovdqu       0x90-0x40($Htbl),$Hkey  # $Hkey^7
1343         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1344         vxorps          $Xo,$Tred,$Tred
1345          vpunpckhqdq    $Ii,$Ii,$T2
1346          vpxor          $Xhi,$Zhi,$Zhi
1347          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1348           vmovdqu       0xb0-0x40($Htbl),$HK
1349          vpxor          $Ii,$T2,$T2
1350          vpxor          $Xmi,$Zmi,$Zmi
1351
1352           vmovdqu       ($inp),$Ij              # I[0]
1353          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1354           vpshufb       $bswap,$Ij,$Ij
1355          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1356           vmovdqu       0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1357         vpxor           $Tred,$Ij,$Ij
1358          vpclmulqdq     \$0x10,$HK,  $T2,$Xmi
1359         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1360
1361         lea             0x80($inp),$inp
1362         sub             \$0x80,$len
1363         jnc             .Loop8x_avx
1364
1365         add             \$0x80,$len
1366         jmp             .Ltail_no_xor_avx
1367
1368 .align  32
1369 .Lshort_avx:
1370         vmovdqu         -0x10($inp,$len),$Ii    # very last word
1371         lea             ($inp,$len),$inp
1372         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1373         vmovdqu         0x20-0x40($Htbl),$HK
1374         vpshufb         $bswap,$Ii,$Ij
1375
1376         vmovdqa         $Xlo,$Zlo               # subtle way to zero $Zlo,
1377         vmovdqa         $Xhi,$Zhi               # $Zhi and
1378         vmovdqa         $Xmi,$Zmi               # $Zmi
1379         sub             \$0x10,$len
1380         jz              .Ltail_avx
1381
1382         vpunpckhqdq     $Ij,$Ij,$T1
1383         vpxor           $Xlo,$Zlo,$Zlo
1384         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1385         vpxor           $Ij,$T1,$T1
1386          vmovdqu        -0x20($inp),$Ii
1387         vpxor           $Xhi,$Zhi,$Zhi
1388         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1389         vmovdqu         0x10-0x40($Htbl),$Hkey  # $Hkey^2
1390          vpshufb        $bswap,$Ii,$Ij
1391         vpxor           $Xmi,$Zmi,$Zmi
1392         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1393         vpsrldq         \$8,$HK,$HK
1394         sub             \$0x10,$len
1395         jz              .Ltail_avx
1396
1397         vpunpckhqdq     $Ij,$Ij,$T1
1398         vpxor           $Xlo,$Zlo,$Zlo
1399         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1400         vpxor           $Ij,$T1,$T1
1401          vmovdqu        -0x30($inp),$Ii
1402         vpxor           $Xhi,$Zhi,$Zhi
1403         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1404         vmovdqu         0x30-0x40($Htbl),$Hkey  # $Hkey^3
1405          vpshufb        $bswap,$Ii,$Ij
1406         vpxor           $Xmi,$Zmi,$Zmi
1407         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1408         vmovdqu         0x50-0x40($Htbl),$HK
1409         sub             \$0x10,$len
1410         jz              .Ltail_avx
1411
1412         vpunpckhqdq     $Ij,$Ij,$T1
1413         vpxor           $Xlo,$Zlo,$Zlo
1414         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1415         vpxor           $Ij,$T1,$T1
1416          vmovdqu        -0x40($inp),$Ii
1417         vpxor           $Xhi,$Zhi,$Zhi
1418         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1419         vmovdqu         0x40-0x40($Htbl),$Hkey  # $Hkey^4
1420          vpshufb        $bswap,$Ii,$Ij
1421         vpxor           $Xmi,$Zmi,$Zmi
1422         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1423         vpsrldq         \$8,$HK,$HK
1424         sub             \$0x10,$len
1425         jz              .Ltail_avx
1426
1427         vpunpckhqdq     $Ij,$Ij,$T1
1428         vpxor           $Xlo,$Zlo,$Zlo
1429         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1430         vpxor           $Ij,$T1,$T1
1431          vmovdqu        -0x50($inp),$Ii
1432         vpxor           $Xhi,$Zhi,$Zhi
1433         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1434         vmovdqu         0x60-0x40($Htbl),$Hkey  # $Hkey^5
1435          vpshufb        $bswap,$Ii,$Ij
1436         vpxor           $Xmi,$Zmi,$Zmi
1437         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1438         vmovdqu         0x80-0x40($Htbl),$HK
1439         sub             \$0x10,$len
1440         jz              .Ltail_avx
1441
1442         vpunpckhqdq     $Ij,$Ij,$T1
1443         vpxor           $Xlo,$Zlo,$Zlo
1444         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1445         vpxor           $Ij,$T1,$T1
1446          vmovdqu        -0x60($inp),$Ii
1447         vpxor           $Xhi,$Zhi,$Zhi
1448         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1449         vmovdqu         0x70-0x40($Htbl),$Hkey  # $Hkey^6
1450          vpshufb        $bswap,$Ii,$Ij
1451         vpxor           $Xmi,$Zmi,$Zmi
1452         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1453         vpsrldq         \$8,$HK,$HK
1454         sub             \$0x10,$len
1455         jz              .Ltail_avx
1456
1457         vpunpckhqdq     $Ij,$Ij,$T1
1458         vpxor           $Xlo,$Zlo,$Zlo
1459         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1460         vpxor           $Ij,$T1,$T1
1461          vmovdqu        -0x70($inp),$Ii
1462         vpxor           $Xhi,$Zhi,$Zhi
1463         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1464         vmovdqu         0x90-0x40($Htbl),$Hkey  # $Hkey^7
1465          vpshufb        $bswap,$Ii,$Ij
1466         vpxor           $Xmi,$Zmi,$Zmi
1467         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1468         vmovq           0xb8-0x40($Htbl),$HK
1469         sub             \$0x10,$len
1470         jmp             .Ltail_avx
1471
1472 .align  32
1473 .Ltail_avx:
1474         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1475 .Ltail_no_xor_avx:
1476         vpunpckhqdq     $Ij,$Ij,$T1
1477         vpxor           $Xlo,$Zlo,$Zlo
1478         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1479         vpxor           $Ij,$T1,$T1
1480         vpxor           $Xhi,$Zhi,$Zhi
1481         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1482         vpxor           $Xmi,$Zmi,$Zmi
1483         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1484
1485         vmovdqu         (%r10),$Tred
1486
1487         vpxor           $Xlo,$Zlo,$Xi
1488         vpxor           $Xhi,$Zhi,$Xo
1489         vpxor           $Xmi,$Zmi,$Zmi
1490
1491         vpxor           $Xi, $Zmi,$Zmi          # aggregated Karatsuba post-processing
1492         vpxor           $Xo, $Zmi,$Zmi
1493         vpslldq         \$8, $Zmi,$T2
1494         vpsrldq         \$8, $Zmi,$Zmi
1495         vpxor           $T2, $Xi, $Xi
1496         vpxor           $Zmi,$Xo, $Xo
1497
1498         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 1st phase
1499         vpalignr        \$8,$Xi,$Xi,$Xi
1500         vpxor           $T2,$Xi,$Xi
1501
1502         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 2nd phase
1503         vpalignr        \$8,$Xi,$Xi,$Xi
1504         vpxor           $Xo,$Xi,$Xi
1505         vpxor           $T2,$Xi,$Xi
1506
1507         cmp             \$0,$len
1508         jne             .Lshort_avx
1509
1510         vpshufb         $bswap,$Xi,$Xi
1511         vmovdqu         $Xi,($Xip)
1512         vzeroupper
1513 ___
1514 $code.=<<___ if ($win64);
1515         movaps  (%rsp),%xmm6
1516         movaps  0x10(%rsp),%xmm7
1517         movaps  0x20(%rsp),%xmm8
1518         movaps  0x30(%rsp),%xmm9
1519         movaps  0x40(%rsp),%xmm10
1520         movaps  0x50(%rsp),%xmm11
1521         movaps  0x60(%rsp),%xmm12
1522         movaps  0x70(%rsp),%xmm13
1523         movaps  0x80(%rsp),%xmm14
1524         movaps  0x90(%rsp),%xmm15
1525         lea     0xa8(%rsp),%rsp
1526 .LSEH_end_gcm_ghash_avx:
1527 ___
1528 $code.=<<___;
1529         ret
1530 .size   gcm_ghash_avx,.-gcm_ghash_avx
1531 ___
1532 } else {
1533 $code.=<<___;
1534         jmp     .L_ghash_clmul
1535 .size   gcm_ghash_avx,.-gcm_ghash_avx
1536 ___
1537 }
1538 \f
1539 $code.=<<___;
1540 .align  64
1541 .Lbswap_mask:
1542         .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1543 .L0x1c2_polynomial:
1544         .byte   1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1545 .L7_mask:
1546         .long   7,0,7,0
1547 .L7_mask_poly:
1548         .long   7,0,`0xE1<<1`,0
1549 .align  64
1550 .type   .Lrem_4bit,\@object
1551 .Lrem_4bit:
1552         .long   0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1553         .long   0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1554         .long   0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1555         .long   0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1556 .type   .Lrem_8bit,\@object
1557 .Lrem_8bit:
1558         .value  0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1559         .value  0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1560         .value  0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1561         .value  0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1562         .value  0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1563         .value  0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1564         .value  0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1565         .value  0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1566         .value  0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1567         .value  0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1568         .value  0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1569         .value  0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1570         .value  0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1571         .value  0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1572         .value  0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1573         .value  0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1574         .value  0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1575         .value  0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1576         .value  0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1577         .value  0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1578         .value  0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1579         .value  0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1580         .value  0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1581         .value  0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1582         .value  0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1583         .value  0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1584         .value  0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1585         .value  0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1586         .value  0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1587         .value  0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1588         .value  0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1589         .value  0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1590
1591 .asciz  "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1592 .align  64
1593 ___
1594 \f
1595 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1596 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1597 if ($win64) {
1598 $rec="%rcx";
1599 $frame="%rdx";
1600 $context="%r8";
1601 $disp="%r9";
1602
1603 $code.=<<___;
1604 .extern __imp_RtlVirtualUnwind
1605 .type   se_handler,\@abi-omnipotent
1606 .align  16
1607 se_handler:
1608         push    %rsi
1609         push    %rdi
1610         push    %rbx
1611         push    %rbp
1612         push    %r12
1613         push    %r13
1614         push    %r14
1615         push    %r15
1616         pushfq
1617         sub     \$64,%rsp
1618
1619         mov     120($context),%rax      # pull context->Rax
1620         mov     248($context),%rbx      # pull context->Rip
1621
1622         mov     8($disp),%rsi           # disp->ImageBase
1623         mov     56($disp),%r11          # disp->HandlerData
1624
1625         mov     0(%r11),%r10d           # HandlerData[0]
1626         lea     (%rsi,%r10),%r10        # prologue label
1627         cmp     %r10,%rbx               # context->Rip<prologue label
1628         jb      .Lin_prologue
1629
1630         mov     152($context),%rax      # pull context->Rsp
1631
1632         mov     4(%r11),%r10d           # HandlerData[1]
1633         lea     (%rsi,%r10),%r10        # epilogue label
1634         cmp     %r10,%rbx               # context->Rip>=epilogue label
1635         jae     .Lin_prologue
1636
1637         lea     24(%rax),%rax           # adjust "rsp"
1638
1639         mov     -8(%rax),%rbx
1640         mov     -16(%rax),%rbp
1641         mov     -24(%rax),%r12
1642         mov     %rbx,144($context)      # restore context->Rbx
1643         mov     %rbp,160($context)      # restore context->Rbp
1644         mov     %r12,216($context)      # restore context->R12
1645
1646 .Lin_prologue:
1647         mov     8(%rax),%rdi
1648         mov     16(%rax),%rsi
1649         mov     %rax,152($context)      # restore context->Rsp
1650         mov     %rsi,168($context)      # restore context->Rsi
1651         mov     %rdi,176($context)      # restore context->Rdi
1652
1653         mov     40($disp),%rdi          # disp->ContextRecord
1654         mov     $context,%rsi           # context
1655         mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
1656         .long   0xa548f3fc              # cld; rep movsq
1657
1658         mov     $disp,%rsi
1659         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1660         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1661         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1662         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1663         mov     40(%rsi),%r10           # disp->ContextRecord
1664         lea     56(%rsi),%r11           # &disp->HandlerData
1665         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1666         mov     %r10,32(%rsp)           # arg5
1667         mov     %r11,40(%rsp)           # arg6
1668         mov     %r12,48(%rsp)           # arg7
1669         mov     %rcx,56(%rsp)           # arg8, (NULL)
1670         call    *__imp_RtlVirtualUnwind(%rip)
1671
1672         mov     \$1,%eax                # ExceptionContinueSearch
1673         add     \$64,%rsp
1674         popfq
1675         pop     %r15
1676         pop     %r14
1677         pop     %r13
1678         pop     %r12
1679         pop     %rbp
1680         pop     %rbx
1681         pop     %rdi
1682         pop     %rsi
1683         ret
1684 .size   se_handler,.-se_handler
1685
1686 .section        .pdata
1687 .align  4
1688         .rva    .LSEH_begin_gcm_gmult_4bit
1689         .rva    .LSEH_end_gcm_gmult_4bit
1690         .rva    .LSEH_info_gcm_gmult_4bit
1691
1692         .rva    .LSEH_begin_gcm_ghash_4bit
1693         .rva    .LSEH_end_gcm_ghash_4bit
1694         .rva    .LSEH_info_gcm_ghash_4bit
1695
1696         .rva    .LSEH_begin_gcm_init_clmul
1697         .rva    .LSEH_end_gcm_init_clmul
1698         .rva    .LSEH_info_gcm_init_clmul
1699
1700         .rva    .LSEH_begin_gcm_ghash_clmul
1701         .rva    .LSEH_end_gcm_ghash_clmul
1702         .rva    .LSEH_info_gcm_ghash_clmul
1703 ___
1704 $code.=<<___    if ($avx);
1705         .rva    .LSEH_begin_gcm_init_avx
1706         .rva    .LSEH_end_gcm_init_avx
1707         .rva    .LSEH_info_gcm_init_clmul
1708
1709         .rva    .LSEH_begin_gcm_ghash_avx
1710         .rva    .LSEH_end_gcm_ghash_avx
1711         .rva    .LSEH_info_gcm_ghash_clmul
1712 ___
1713 $code.=<<___;
1714 .section        .xdata
1715 .align  8
1716 .LSEH_info_gcm_gmult_4bit:
1717         .byte   9,0,0,0
1718         .rva    se_handler
1719         .rva    .Lgmult_prologue,.Lgmult_epilogue       # HandlerData
1720 .LSEH_info_gcm_ghash_4bit:
1721         .byte   9,0,0,0
1722         .rva    se_handler
1723         .rva    .Lghash_prologue,.Lghash_epilogue       # HandlerData
1724 .LSEH_info_gcm_init_clmul:
1725         .byte   0x01,0x08,0x03,0x00
1726         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1727         .byte   0x04,0x22,0x00,0x00     #sub    rsp,0x18
1728 .LSEH_info_gcm_ghash_clmul:
1729         .byte   0x01,0x33,0x16,0x00
1730         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1731         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1732         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1733         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1734         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1735         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1736         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1737         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1738         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1739         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1740         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1741 ___
1742 }
1743 \f
1744 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1745
1746 print $code;
1747
1748 close STDOUT;