x86[_64] assembly pack: add Silvermont performance data.
[openssl.git] / crypto / modes / asm / ghash-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # March, June 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
19 #
20 #               gcc 3.4.x(*)    assembler
21 #
22 # P4            28.6            14.0            +100%
23 # Opteron       19.3            7.7             +150%
24 # Core2         17.8            8.1(**)         +120%
25 # Atom          31.6            16.8            +88%
26 # VIA Nano      21.8            10.1            +115%
27 #
28 # (*)   comparison is not completely fair, because C results are
29 #       for vanilla "256B" implementation, while assembler results
30 #       are for "528B";-)
31 # (**)  it's mystery [to me] why Core2 result is not same as for
32 #       Opteron;
33
34 # May 2010
35 #
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
38 # techniques.
39 #
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
43
44 # December 2012
45 #
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
61 #
62 # Westmere      1.78(+13%)
63 # Sandy Bridge  1.80(+8%)
64 # Ivy Bridge    1.80(+7%)
65 # Haswell       0.55(+93%) (if system doesn't support AVX)
66 # Bulldozer     1.49(+27%)
67 # Silvermont    2.88(+13%)
68
69 # March 2013
70 #
71 # ... 8x aggregate factor AVX code path is using reduction algorithm
72 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
73 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
74 # sub-optimally in comparison to above mentioned version. But thanks
75 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
76 # it performs in 0.41 cycles per byte on Haswell processor.
77 #
78 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
79
80 $flavour = shift;
81 $output  = shift;
82 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
83
84 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
85
86 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
87 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
88 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
89 die "can't locate x86_64-xlate.pl";
90
91 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
92                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
93         $avx = ($1>=2.19) + ($1>=2.22);
94 }
95
96 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
97             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
98         $avx = ($1>=2.09) + ($1>=2.10);
99 }
100
101 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
102             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
103         $avx = ($1>=10) + ($1>=11);
104 }
105
106 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
107         $avx = ($2>=3.0) + ($2>3.0);
108 }
109
110 open OUT,"| \"$^X\" $xlate $flavour $output";
111 *STDOUT=*OUT;
112
113 $do4xaggr=1;
114
115 # common register layout
116 $nlo="%rax";
117 $nhi="%rbx";
118 $Zlo="%r8";
119 $Zhi="%r9";
120 $tmp="%r10";
121 $rem_4bit = "%r11";
122
123 $Xi="%rdi";
124 $Htbl="%rsi";
125
126 # per-function register layout
127 $cnt="%rcx";
128 $rem="%rdx";
129
130 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/     or
131                         $r =~ s/%[er]([sd]i)/%\1l/      or
132                         $r =~ s/%[er](bp)/%\1l/         or
133                         $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
134
135 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
136 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
137   my $arg = pop;
138     $arg = "\$$arg" if ($arg*1 eq $arg);
139     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
140 }
141 \f
142 { my $N;
143   sub loop() {
144   my $inp = shift;
145
146         $N++;
147 $code.=<<___;
148         xor     $nlo,$nlo
149         xor     $nhi,$nhi
150         mov     `&LB("$Zlo")`,`&LB("$nlo")`
151         mov     `&LB("$Zlo")`,`&LB("$nhi")`
152         shl     \$4,`&LB("$nlo")`
153         mov     \$14,$cnt
154         mov     8($Htbl,$nlo),$Zlo
155         mov     ($Htbl,$nlo),$Zhi
156         and     \$0xf0,`&LB("$nhi")`
157         mov     $Zlo,$rem
158         jmp     .Loop$N
159
160 .align  16
161 .Loop$N:
162         shr     \$4,$Zlo
163         and     \$0xf,$rem
164         mov     $Zhi,$tmp
165         mov     ($inp,$cnt),`&LB("$nlo")`
166         shr     \$4,$Zhi
167         xor     8($Htbl,$nhi),$Zlo
168         shl     \$60,$tmp
169         xor     ($Htbl,$nhi),$Zhi
170         mov     `&LB("$nlo")`,`&LB("$nhi")`
171         xor     ($rem_4bit,$rem,8),$Zhi
172         mov     $Zlo,$rem
173         shl     \$4,`&LB("$nlo")`
174         xor     $tmp,$Zlo
175         dec     $cnt
176         js      .Lbreak$N
177
178         shr     \$4,$Zlo
179         and     \$0xf,$rem
180         mov     $Zhi,$tmp
181         shr     \$4,$Zhi
182         xor     8($Htbl,$nlo),$Zlo
183         shl     \$60,$tmp
184         xor     ($Htbl,$nlo),$Zhi
185         and     \$0xf0,`&LB("$nhi")`
186         xor     ($rem_4bit,$rem,8),$Zhi
187         mov     $Zlo,$rem
188         xor     $tmp,$Zlo
189         jmp     .Loop$N
190
191 .align  16
192 .Lbreak$N:
193         shr     \$4,$Zlo
194         and     \$0xf,$rem
195         mov     $Zhi,$tmp
196         shr     \$4,$Zhi
197         xor     8($Htbl,$nlo),$Zlo
198         shl     \$60,$tmp
199         xor     ($Htbl,$nlo),$Zhi
200         and     \$0xf0,`&LB("$nhi")`
201         xor     ($rem_4bit,$rem,8),$Zhi
202         mov     $Zlo,$rem
203         xor     $tmp,$Zlo
204
205         shr     \$4,$Zlo
206         and     \$0xf,$rem
207         mov     $Zhi,$tmp
208         shr     \$4,$Zhi
209         xor     8($Htbl,$nhi),$Zlo
210         shl     \$60,$tmp
211         xor     ($Htbl,$nhi),$Zhi
212         xor     $tmp,$Zlo
213         xor     ($rem_4bit,$rem,8),$Zhi
214
215         bswap   $Zlo
216         bswap   $Zhi
217 ___
218 }}
219
220 $code=<<___;
221 .text
222 .extern OPENSSL_ia32cap_P
223
224 .globl  gcm_gmult_4bit
225 .type   gcm_gmult_4bit,\@function,2
226 .align  16
227 gcm_gmult_4bit:
228         push    %rbx
229         push    %rbp            # %rbp and %r12 are pushed exclusively in
230         push    %r12            # order to reuse Win64 exception handler...
231 .Lgmult_prologue:
232
233         movzb   15($Xi),$Zlo
234         lea     .Lrem_4bit(%rip),$rem_4bit
235 ___
236         &loop   ($Xi);
237 $code.=<<___;
238         mov     $Zlo,8($Xi)
239         mov     $Zhi,($Xi)
240
241         mov     16(%rsp),%rbx
242         lea     24(%rsp),%rsp
243 .Lgmult_epilogue:
244         ret
245 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
246 ___
247 \f
248 # per-function register layout
249 $inp="%rdx";
250 $len="%rcx";
251 $rem_8bit=$rem_4bit;
252
253 $code.=<<___;
254 .globl  gcm_ghash_4bit
255 .type   gcm_ghash_4bit,\@function,4
256 .align  16
257 gcm_ghash_4bit:
258         push    %rbx
259         push    %rbp
260         push    %r12
261         push    %r13
262         push    %r14
263         push    %r15
264         sub     \$280,%rsp
265 .Lghash_prologue:
266         mov     $inp,%r14               # reassign couple of args
267         mov     $len,%r15
268 ___
269 { my $inp="%r14";
270   my $dat="%edx";
271   my $len="%r15";
272   my @nhi=("%ebx","%ecx");
273   my @rem=("%r12","%r13");
274   my $Hshr4="%rbp";
275
276         &sub    ($Htbl,-128);           # size optimization
277         &lea    ($Hshr4,"16+128(%rsp)");
278         { my @lo =($nlo,$nhi);
279           my @hi =($Zlo,$Zhi);
280
281           &xor  ($dat,$dat);
282           for ($i=0,$j=-2;$i<18;$i++,$j++) {
283             &mov        ("$j(%rsp)",&LB($dat))          if ($i>1);
284             &or         ($lo[0],$tmp)                   if ($i>1);
285             &mov        (&LB($dat),&LB($lo[1]))         if ($i>0 && $i<17);
286             &shr        ($lo[1],4)                      if ($i>0 && $i<17);
287             &mov        ($tmp,$hi[1])                   if ($i>0 && $i<17);
288             &shr        ($hi[1],4)                      if ($i>0 && $i<17);
289             &mov        ("8*$j($Hshr4)",$hi[0])         if ($i>1);
290             &mov        ($hi[0],"16*$i+0-128($Htbl)")   if ($i<16);
291             &shl        (&LB($dat),4)                   if ($i>0 && $i<17);
292             &mov        ("8*$j-128($Hshr4)",$lo[0])     if ($i>1);
293             &mov        ($lo[0],"16*$i+8-128($Htbl)")   if ($i<16);
294             &shl        ($tmp,60)                       if ($i>0 && $i<17);
295
296             push        (@lo,shift(@lo));
297             push        (@hi,shift(@hi));
298           }
299         }
300         &add    ($Htbl,-128);
301         &mov    ($Zlo,"8($Xi)");
302         &mov    ($Zhi,"0($Xi)");
303         &add    ($len,$inp);            # pointer to the end of data
304         &lea    ($rem_8bit,".Lrem_8bit(%rip)");
305         &jmp    (".Louter_loop");
306
307 $code.=".align  16\n.Louter_loop:\n";
308         &xor    ($Zhi,"($inp)");
309         &mov    ("%rdx","8($inp)");
310         &lea    ($inp,"16($inp)");
311         &xor    ("%rdx",$Zlo);
312         &mov    ("($Xi)",$Zhi);
313         &mov    ("8($Xi)","%rdx");
314         &shr    ("%rdx",32);
315
316         &xor    ($nlo,$nlo);
317         &rol    ($dat,8);
318         &mov    (&LB($nlo),&LB($dat));
319         &movz   ($nhi[0],&LB($dat));
320         &shl    (&LB($nlo),4);
321         &shr    ($nhi[0],4);
322
323         for ($j=11,$i=0;$i<15;$i++) {
324             &rol        ($dat,8);
325             &xor        ($Zlo,"8($Htbl,$nlo)")                  if ($i>0);
326             &xor        ($Zhi,"($Htbl,$nlo)")                   if ($i>0);
327             &mov        ($Zlo,"8($Htbl,$nlo)")                  if ($i==0);
328             &mov        ($Zhi,"($Htbl,$nlo)")                   if ($i==0);
329
330             &mov        (&LB($nlo),&LB($dat));
331             &xor        ($Zlo,$tmp)                             if ($i>0);
332             &movzw      ($rem[1],"($rem_8bit,$rem[1],2)")       if ($i>0);
333
334             &movz       ($nhi[1],&LB($dat));
335             &shl        (&LB($nlo),4);
336             &movzb      ($rem[0],"(%rsp,$nhi[0])");
337
338             &shr        ($nhi[1],4)                             if ($i<14);
339             &and        ($nhi[1],0xf0)                          if ($i==14);
340             &shl        ($rem[1],48)                            if ($i>0);
341             &xor        ($rem[0],$Zlo);
342
343             &mov        ($tmp,$Zhi);
344             &xor        ($Zhi,$rem[1])                          if ($i>0);
345             &shr        ($Zlo,8);
346
347             &movz       ($rem[0],&LB($rem[0]));
348             &mov        ($dat,"$j($Xi)")                        if (--$j%4==0);
349             &shr        ($Zhi,8);
350
351             &xor        ($Zlo,"-128($Hshr4,$nhi[0],8)");
352             &shl        ($tmp,56);
353             &xor        ($Zhi,"($Hshr4,$nhi[0],8)");
354
355             unshift     (@nhi,pop(@nhi));               # "rotate" registers
356             unshift     (@rem,pop(@rem));
357         }
358         &movzw  ($rem[1],"($rem_8bit,$rem[1],2)");
359         &xor    ($Zlo,"8($Htbl,$nlo)");
360         &xor    ($Zhi,"($Htbl,$nlo)");
361
362         &shl    ($rem[1],48);
363         &xor    ($Zlo,$tmp);
364
365         &xor    ($Zhi,$rem[1]);
366         &movz   ($rem[0],&LB($Zlo));
367         &shr    ($Zlo,4);
368
369         &mov    ($tmp,$Zhi);
370         &shl    (&LB($rem[0]),4);
371         &shr    ($Zhi,4);
372
373         &xor    ($Zlo,"8($Htbl,$nhi[0])");
374         &movzw  ($rem[0],"($rem_8bit,$rem[0],2)");
375         &shl    ($tmp,60);
376
377         &xor    ($Zhi,"($Htbl,$nhi[0])");
378         &xor    ($Zlo,$tmp);
379         &shl    ($rem[0],48);
380
381         &bswap  ($Zlo);
382         &xor    ($Zhi,$rem[0]);
383
384         &bswap  ($Zhi);
385         &cmp    ($inp,$len);
386         &jb     (".Louter_loop");
387 }
388 $code.=<<___;
389         mov     $Zlo,8($Xi)
390         mov     $Zhi,($Xi)
391
392         lea     280(%rsp),%rsi
393         mov     0(%rsi),%r15
394         mov     8(%rsi),%r14
395         mov     16(%rsi),%r13
396         mov     24(%rsi),%r12
397         mov     32(%rsi),%rbp
398         mov     40(%rsi),%rbx
399         lea     48(%rsi),%rsp
400 .Lghash_epilogue:
401         ret
402 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
403 ___
404 \f
405 ######################################################################
406 # PCLMULQDQ version.
407
408 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
409                 ("%rdi","%rsi","%rdx","%rcx");  # Unix order
410
411 ($Xi,$Xhi)=("%xmm0","%xmm1");   $Hkey="%xmm2";
412 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
413
414 sub clmul64x64_T2 {     # minimal register pressure
415 my ($Xhi,$Xi,$Hkey,$HK)=@_;
416
417 if (!defined($HK)) {    $HK = $T2;
418 $code.=<<___;
419         movdqa          $Xi,$Xhi                #
420         pshufd          \$0b01001110,$Xi,$T1
421         pshufd          \$0b01001110,$Hkey,$T2
422         pxor            $Xi,$T1                 #
423         pxor            $Hkey,$T2
424 ___
425 } else {
426 $code.=<<___;
427         movdqa          $Xi,$Xhi                #
428         pshufd          \$0b01001110,$Xi,$T1
429         pxor            $Xi,$T1                 #
430 ___
431 }
432 $code.=<<___;
433         pclmulqdq       \$0x00,$Hkey,$Xi        #######
434         pclmulqdq       \$0x11,$Hkey,$Xhi       #######
435         pclmulqdq       \$0x00,$HK,$T1          #######
436         pxor            $Xi,$T1                 #
437         pxor            $Xhi,$T1                #
438
439         movdqa          $T1,$T2                 #
440         psrldq          \$8,$T1
441         pslldq          \$8,$T2                 #
442         pxor            $T1,$Xhi
443         pxor            $T2,$Xi                 #
444 ___
445 }
446
447 sub reduction_alg9 {    # 17/11 times faster than Intel version
448 my ($Xhi,$Xi) = @_;
449
450 $code.=<<___;
451         # 1st phase
452         movdqa          $Xi,$T2                 #
453         movdqa          $Xi,$T1
454         psllq           \$5,$Xi
455         pxor            $Xi,$T1                 #
456         psllq           \$1,$Xi
457         pxor            $T1,$Xi                 #
458         psllq           \$57,$Xi                #
459         movdqa          $Xi,$T1                 #
460         pslldq          \$8,$Xi
461         psrldq          \$8,$T1                 #       
462         pxor            $T2,$Xi
463         pxor            $T1,$Xhi                #
464
465         # 2nd phase
466         movdqa          $Xi,$T2
467         psrlq           \$1,$Xi
468         pxor            $T2,$Xhi                #
469         pxor            $Xi,$T2
470         psrlq           \$5,$Xi
471         pxor            $T2,$Xi                 #
472         psrlq           \$1,$Xi                 #
473         pxor            $Xhi,$Xi                #
474 ___
475 }
476 \f
477 { my ($Htbl,$Xip)=@_4args;
478   my $HK="%xmm6";
479
480 $code.=<<___;
481 .globl  gcm_init_clmul
482 .type   gcm_init_clmul,\@abi-omnipotent
483 .align  16
484 gcm_init_clmul:
485 .L_init_clmul:
486 ___
487 $code.=<<___ if ($win64);
488 .LSEH_begin_gcm_init_clmul:
489         # I can't trust assembler to use specific encoding:-(
490         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
491         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
492 ___
493 $code.=<<___;
494         movdqu          ($Xip),$Hkey
495         pshufd          \$0b01001110,$Hkey,$Hkey        # dword swap
496
497         # <<1 twist
498         pshufd          \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
499         movdqa          $Hkey,$T1
500         psllq           \$1,$Hkey
501         pxor            $T3,$T3                 #
502         psrlq           \$63,$T1
503         pcmpgtd         $T2,$T3                 # broadcast carry bit
504         pslldq          \$8,$T1
505         por             $T1,$Hkey               # H<<=1
506
507         # magic reduction
508         pand            .L0x1c2_polynomial(%rip),$T3
509         pxor            $T3,$Hkey               # if(carry) H^=0x1c2_polynomial
510
511         # calculate H^2
512         pshufd          \$0b01001110,$Hkey,$HK
513         movdqa          $Hkey,$Xi
514         pxor            $Hkey,$HK
515 ___
516         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);
517         &reduction_alg9 ($Xhi,$Xi);
518 $code.=<<___;
519         pshufd          \$0b01001110,$Hkey,$T1
520         pshufd          \$0b01001110,$Xi,$T2
521         pxor            $Hkey,$T1               # Karatsuba pre-processing
522         movdqu          $Hkey,0x00($Htbl)       # save H
523         pxor            $Xi,$T2                 # Karatsuba pre-processing
524         movdqu          $Xi,0x10($Htbl)         # save H^2
525         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
526         movdqu          $T2,0x20($Htbl)         # save Karatsuba "salt"
527 ___
528 if ($do4xaggr) {
529         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^3
530         &reduction_alg9 ($Xhi,$Xi);
531 $code.=<<___;
532         movdqa          $Xi,$T3
533 ___
534         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^4
535         &reduction_alg9 ($Xhi,$Xi);
536 $code.=<<___;
537         pshufd          \$0b01001110,$T3,$T1
538         pshufd          \$0b01001110,$Xi,$T2
539         pxor            $T3,$T1                 # Karatsuba pre-processing
540         movdqu          $T3,0x30($Htbl)         # save H^3
541         pxor            $Xi,$T2                 # Karatsuba pre-processing
542         movdqu          $Xi,0x40($Htbl)         # save H^4
543         palignr         \$8,$T1,$T2             # low part is H^3.lo^H^3.hi...
544         movdqu          $T2,0x50($Htbl)         # save Karatsuba "salt"
545 ___
546 }
547 $code.=<<___ if ($win64);
548         movaps  (%rsp),%xmm6
549         lea     0x18(%rsp),%rsp
550 .LSEH_end_gcm_init_clmul:
551 ___
552 $code.=<<___;
553         ret
554 .size   gcm_init_clmul,.-gcm_init_clmul
555 ___
556 }
557
558 { my ($Xip,$Htbl)=@_4args;
559
560 $code.=<<___;
561 .globl  gcm_gmult_clmul
562 .type   gcm_gmult_clmul,\@abi-omnipotent
563 .align  16
564 gcm_gmult_clmul:
565 .L_gmult_clmul:
566         movdqu          ($Xip),$Xi
567         movdqa          .Lbswap_mask(%rip),$T3
568         movdqu          ($Htbl),$Hkey
569         movdqu          0x20($Htbl),$T2
570         pshufb          $T3,$Xi
571 ___
572         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
573 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
574         # experimental alternative. special thing about is that there
575         # no dependency between the two multiplications... 
576         mov             \$`0xE1<<1`,%eax
577         mov             \$0xA040608020C0E000,%r10       # ((7..0)·0xE0)&0xff
578         mov             \$0x07,%r11d
579         movq            %rax,$T1
580         movq            %r10,$T2
581         movq            %r11,$T3                # borrow $T3
582         pand            $Xi,$T3
583         pshufb          $T3,$T2                 # ($Xi&7)·0xE0
584         movq            %rax,$T3
585         pclmulqdq       \$0x00,$Xi,$T1          # Â·(0xE1<<1)
586         pxor            $Xi,$T2
587         pslldq          \$15,$T2
588         paddd           $T2,$T2                 # <<(64+56+1)
589         pxor            $T2,$Xi
590         pclmulqdq       \$0x01,$T3,$Xi
591         movdqa          .Lbswap_mask(%rip),$T3  # reload $T3
592         psrldq          \$1,$T1
593         pxor            $T1,$Xhi
594         pslldq          \$7,$Xi
595         pxor            $Xhi,$Xi
596 ___
597 $code.=<<___;
598         pshufb          $T3,$Xi
599         movdqu          $Xi,($Xip)
600         ret
601 .size   gcm_gmult_clmul,.-gcm_gmult_clmul
602 ___
603 }
604 \f
605 { my ($Xip,$Htbl,$inp,$len)=@_4args;
606   my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
607   my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
608
609 $code.=<<___;
610 .globl  gcm_ghash_clmul
611 .type   gcm_ghash_clmul,\@abi-omnipotent
612 .align  32
613 gcm_ghash_clmul:
614 .L_ghash_clmul:
615 ___
616 $code.=<<___ if ($win64);
617         lea     -0x88(%rsp),%rax
618 .LSEH_begin_gcm_ghash_clmul:
619         # I can't trust assembler to use specific encoding:-(
620         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
621         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
622         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
623         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
624         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
625         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
626         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
627         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
628         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
629         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
630         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
631 ___
632 $code.=<<___;
633         movdqa          .Lbswap_mask(%rip),$T3
634
635         movdqu          ($Xip),$Xi
636         movdqu          ($Htbl),$Hkey
637         movdqu          0x20($Htbl),$HK
638         pshufb          $T3,$Xi
639
640         sub             \$0x10,$len
641         jz              .Lodd_tail
642
643         movdqu          0x10($Htbl),$Hkey2
644 ___
645 if ($do4xaggr) {
646 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
647
648 $code.=<<___;
649         mov             OPENSSL_ia32cap_P+4(%rip),%eax
650         cmp             \$0x30,$len
651         jb              .Lskip4x
652
653         and             \$`1<<26|1<<22`,%eax    # isolate MOVBE+XSAVE
654         cmp             \$`1<<22`,%eax          # check for MOVBE without XSAVE
655         je              .Lskip4x
656
657         sub             \$0x30,$len
658         mov             \$0xA040608020C0E000,%rax       # ((7..0)·0xE0)&0xff
659         movdqu          0x30($Htbl),$Hkey3
660         movdqu          0x40($Htbl),$Hkey4
661
662         #######
663         # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
664         #
665         movdqu          0x30($inp),$Xln
666          movdqu         0x20($inp),$Xl
667         pshufb          $T3,$Xln
668          pshufb         $T3,$Xl
669         movdqa          $Xln,$Xhn
670         pshufd          \$0b01001110,$Xln,$Xmn
671         pxor            $Xln,$Xmn
672         pclmulqdq       \$0x00,$Hkey,$Xln
673         pclmulqdq       \$0x11,$Hkey,$Xhn
674         pclmulqdq       \$0x00,$HK,$Xmn
675
676         movdqa          $Xl,$Xh
677         pshufd          \$0b01001110,$Xl,$Xm
678         pxor            $Xl,$Xm
679         pclmulqdq       \$0x00,$Hkey2,$Xl
680         pclmulqdq       \$0x11,$Hkey2,$Xh
681         pclmulqdq       \$0x10,$HK,$Xm
682         xorps           $Xl,$Xln
683         xorps           $Xh,$Xhn
684         movups          0x50($Htbl),$HK
685         xorps           $Xm,$Xmn
686
687         movdqu          0x10($inp),$Xl
688          movdqu         0($inp),$T1
689         pshufb          $T3,$Xl
690          pshufb         $T3,$T1
691         movdqa          $Xl,$Xh
692         pshufd          \$0b01001110,$Xl,$Xm
693          pxor           $T1,$Xi
694         pxor            $Xl,$Xm
695         pclmulqdq       \$0x00,$Hkey3,$Xl
696          movdqa         $Xi,$Xhi
697          pshufd         \$0b01001110,$Xi,$T1
698          pxor           $Xi,$T1
699         pclmulqdq       \$0x11,$Hkey3,$Xh
700         pclmulqdq       \$0x00,$HK,$Xm
701         xorps           $Xl,$Xln
702         xorps           $Xh,$Xhn
703
704         lea     0x40($inp),$inp
705         sub     \$0x40,$len
706         jc      .Ltail4x
707
708         jmp     .Lmod4_loop
709 .align  32
710 .Lmod4_loop:
711         pclmulqdq       \$0x00,$Hkey4,$Xi
712         xorps           $Xm,$Xmn
713          movdqu         0x30($inp),$Xl
714          pshufb         $T3,$Xl
715         pclmulqdq       \$0x11,$Hkey4,$Xhi
716         xorps           $Xln,$Xi
717          movdqu         0x20($inp),$Xln
718          movdqa         $Xl,$Xh
719         pclmulqdq       \$0x10,$HK,$T1
720          pshufd         \$0b01001110,$Xl,$Xm
721         xorps           $Xhn,$Xhi
722          pxor           $Xl,$Xm
723          pshufb         $T3,$Xln
724         movups          0x20($Htbl),$HK
725         xorps           $Xmn,$T1
726          pclmulqdq      \$0x00,$Hkey,$Xl
727          pshufd         \$0b01001110,$Xln,$Xmn
728
729         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
730          movdqa         $Xln,$Xhn
731         pxor            $Xhi,$T1                #
732          pxor           $Xln,$Xmn
733         movdqa          $T1,$T2                 #
734          pclmulqdq      \$0x11,$Hkey,$Xh
735         pslldq          \$8,$T1
736         psrldq          \$8,$T2                 #
737         pxor            $T1,$Xi
738         movdqa          .L7_mask(%rip),$T1
739         pxor            $T2,$Xhi                #
740         movq            %rax,$T2
741
742         pand            $Xi,$T1                 # 1st phase
743         pshufb          $T1,$T2                 #
744         pxor            $Xi,$T2                 #
745          pclmulqdq      \$0x00,$HK,$Xm
746         psllq           \$57,$T2                #
747         movdqa          $T2,$T1                 #
748         pslldq          \$8,$T2
749          pclmulqdq      \$0x00,$Hkey2,$Xln
750         psrldq          \$8,$T1                 #       
751         pxor            $T2,$Xi
752         pxor            $T1,$Xhi                #
753         movdqu          0($inp),$T1
754
755         movdqa          $Xi,$T2                 # 2nd phase
756         psrlq           \$1,$Xi
757          pclmulqdq      \$0x11,$Hkey2,$Xhn
758          xorps          $Xl,$Xln
759          movdqu         0x10($inp),$Xl
760          pshufb         $T3,$Xl
761          pclmulqdq      \$0x10,$HK,$Xmn
762          xorps          $Xh,$Xhn
763          movups         0x50($Htbl),$HK
764         pshufb          $T3,$T1
765         pxor            $T2,$Xhi                #
766         pxor            $Xi,$T2
767         psrlq           \$5,$Xi
768
769          movdqa         $Xl,$Xh
770          pxor           $Xm,$Xmn
771          pshufd         \$0b01001110,$Xl,$Xm
772         pxor            $T2,$Xi                 #
773         pxor            $T1,$Xhi
774          pxor           $Xl,$Xm
775          pclmulqdq      \$0x00,$Hkey3,$Xl
776         psrlq           \$1,$Xi                 #
777         pxor            $Xhi,$Xi                #
778         movdqa          $Xi,$Xhi
779          pclmulqdq      \$0x11,$Hkey3,$Xh
780          xorps          $Xl,$Xln
781         pshufd          \$0b01001110,$Xi,$T1
782         pxor            $Xi,$T1
783
784          pclmulqdq      \$0x00,$HK,$Xm
785          xorps          $Xh,$Xhn
786
787         lea     0x40($inp),$inp
788         sub     \$0x40,$len
789         jnc     .Lmod4_loop
790
791 .Ltail4x:
792         pclmulqdq       \$0x00,$Hkey4,$Xi
793         pclmulqdq       \$0x11,$Hkey4,$Xhi
794         pclmulqdq       \$0x10,$HK,$T1
795         xorps           $Xm,$Xmn
796         xorps           $Xln,$Xi
797         xorps           $Xhn,$Xhi
798         pxor            $Xi,$Xhi                # aggregated Karatsuba post-processing
799         pxor            $Xmn,$T1
800
801         pxor            $Xhi,$T1                #
802         pxor            $Xi,$Xhi
803
804         movdqa          $T1,$T2                 #
805         psrldq          \$8,$T1
806         pslldq          \$8,$T2                 #
807         pxor            $T1,$Xhi
808         pxor            $T2,$Xi                 #
809 ___
810         &reduction_alg9($Xhi,$Xi);
811 $code.=<<___;
812         add     \$0x40,$len
813         jz      .Ldone
814         movdqu  0x20($Htbl),$HK
815         sub     \$0x10,$len
816         jz      .Lodd_tail
817 .Lskip4x:
818 ___
819 }
820 $code.=<<___;
821         #######
822         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
823         #       [(H*Ii+1) + (H*Xi+1)] mod P =
824         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
825         #
826         movdqu          ($inp),$T1              # Ii
827         movdqu          16($inp),$Xln           # Ii+1
828         pshufb          $T3,$T1
829         pshufb          $T3,$Xln
830         pxor            $T1,$Xi                 # Ii+Xi
831
832         movdqa          $Xln,$Xhn
833         pshufd          \$0b01001110,$Xln,$Xmn
834         pxor            $Xln,$Xmn
835         pclmulqdq       \$0x00,$Hkey,$Xln
836         pclmulqdq       \$0x11,$Hkey,$Xhn
837         pclmulqdq       \$0x00,$HK,$Xmn
838
839         lea             32($inp),$inp           # i+=2
840         nop
841         sub             \$0x20,$len
842         jbe             .Leven_tail
843         nop
844         jmp             .Lmod_loop
845
846 .align  32
847 .Lmod_loop:
848         movdqa          $Xi,$Xhi
849         movdqa          $Xmn,$T1
850         pshufd          \$0b01001110,$Xi,$Xmn   #
851         pxor            $Xi,$Xmn                #
852
853         pclmulqdq       \$0x00,$Hkey2,$Xi
854         pclmulqdq       \$0x11,$Hkey2,$Xhi
855         pclmulqdq       \$0x10,$HK,$Xmn
856
857         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
858         pxor            $Xhn,$Xhi
859           movdqu        ($inp),$T2              # Ii
860         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
861           pshufb        $T3,$T2
862           movdqu        16($inp),$Xln           # Ii+1
863
864         pxor            $Xhi,$T1
865           pxor          $T2,$Xhi                # "Ii+Xi", consume early
866         pxor            $T1,$Xmn
867          pshufb         $T3,$Xln
868         movdqa          $Xmn,$T1                #
869         psrldq          \$8,$T1
870         pslldq          \$8,$Xmn                #
871         pxor            $T1,$Xhi
872         pxor            $Xmn,$Xi                #
873
874         movdqa          $Xln,$Xhn               #
875
876           movdqa        $Xi,$T2                 # 1st phase
877           movdqa        $Xi,$T1
878           psllq         \$5,$Xi
879           pxor          $Xi,$T1                 #
880         pclmulqdq       \$0x00,$Hkey,$Xln       #######
881           psllq         \$1,$Xi
882           pxor          $T1,$Xi                 #
883           psllq         \$57,$Xi                #
884           movdqa        $Xi,$T1                 #
885           pslldq        \$8,$Xi
886           psrldq        \$8,$T1                 #       
887           pxor          $T2,$Xi
888         pshufd          \$0b01001110,$Xhn,$Xmn
889           pxor          $T1,$Xhi                #
890         pxor            $Xhn,$Xmn               #
891
892           movdqa        $Xi,$T2                 # 2nd phase
893           psrlq         \$1,$Xi
894         pclmulqdq       \$0x11,$Hkey,$Xhn       #######
895           pxor          $T2,$Xhi                #
896           pxor          $Xi,$T2
897           psrlq         \$5,$Xi
898           pxor          $T2,$Xi                 #
899         lea             32($inp),$inp
900           psrlq         \$1,$Xi                 #
901         pclmulqdq       \$0x00,$HK,$Xmn         #######
902           pxor          $Xhi,$Xi                #
903
904         sub             \$0x20,$len
905         ja              .Lmod_loop
906
907 .Leven_tail:
908          movdqa         $Xi,$Xhi
909          movdqa         $Xmn,$T1
910          pshufd         \$0b01001110,$Xi,$Xmn   #
911          pxor           $Xi,$Xmn                #
912
913         pclmulqdq       \$0x00,$Hkey2,$Xi
914         pclmulqdq       \$0x11,$Hkey2,$Xhi
915         pclmulqdq       \$0x10,$HK,$Xmn
916
917         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
918         pxor            $Xhn,$Xhi
919         pxor            $Xi,$T1
920         pxor            $Xhi,$T1
921         pxor            $T1,$Xmn
922         movdqa          $Xmn,$T1                #
923         psrldq          \$8,$T1
924         pslldq          \$8,$Xmn                #
925         pxor            $T1,$Xhi
926         pxor            $Xmn,$Xi                #
927 ___
928         &reduction_alg9 ($Xhi,$Xi);
929 $code.=<<___;
930         test            $len,$len
931         jnz             .Ldone
932
933 .Lodd_tail:
934         movdqu          ($inp),$T1              # Ii
935         pshufb          $T3,$T1
936         pxor            $T1,$Xi                 # Ii+Xi
937 ___
938         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H*(Ii+Xi)
939         &reduction_alg9 ($Xhi,$Xi);
940 $code.=<<___;
941 .Ldone:
942         pshufb          $T3,$Xi
943         movdqu          $Xi,($Xip)
944 ___
945 $code.=<<___ if ($win64);
946         movaps  (%rsp),%xmm6
947         movaps  0x10(%rsp),%xmm7
948         movaps  0x20(%rsp),%xmm8
949         movaps  0x30(%rsp),%xmm9
950         movaps  0x40(%rsp),%xmm10
951         movaps  0x50(%rsp),%xmm11
952         movaps  0x60(%rsp),%xmm12
953         movaps  0x70(%rsp),%xmm13
954         movaps  0x80(%rsp),%xmm14
955         movaps  0x90(%rsp),%xmm15
956         lea     0xa8(%rsp),%rsp
957 .LSEH_end_gcm_ghash_clmul:
958 ___
959 $code.=<<___;
960         ret
961 .size   gcm_ghash_clmul,.-gcm_ghash_clmul
962 ___
963 }
964 \f
965 $code.=<<___;
966 .globl  gcm_init_avx
967 .type   gcm_init_avx,\@abi-omnipotent
968 .align  32
969 gcm_init_avx:
970 ___
971 if ($avx) {
972 my ($Htbl,$Xip)=@_4args;
973 my $HK="%xmm6";
974
975 $code.=<<___ if ($win64);
976 .LSEH_begin_gcm_init_avx:
977         # I can't trust assembler to use specific encoding:-(
978         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
979         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
980 ___
981 $code.=<<___;
982         vzeroupper
983
984         vmovdqu         ($Xip),$Hkey
985         vpshufd         \$0b01001110,$Hkey,$Hkey        # dword swap
986
987         # <<1 twist
988         vpshufd         \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
989         vpsrlq          \$63,$Hkey,$T1
990         vpsllq          \$1,$Hkey,$Hkey
991         vpxor           $T3,$T3,$T3             #
992         vpcmpgtd        $T2,$T3,$T3             # broadcast carry bit
993         vpslldq         \$8,$T1,$T1
994         vpor            $T1,$Hkey,$Hkey         # H<<=1
995
996         # magic reduction
997         vpand           .L0x1c2_polynomial(%rip),$T3,$T3
998         vpxor           $T3,$Hkey,$Hkey         # if(carry) H^=0x1c2_polynomial
999
1000         vpunpckhqdq     $Hkey,$Hkey,$HK
1001         vmovdqa         $Hkey,$Xi
1002         vpxor           $Hkey,$HK,$HK
1003         mov             \$4,%r10                # up to H^8
1004         jmp             .Linit_start_avx
1005 ___
1006
1007 sub clmul64x64_avx {
1008 my ($Xhi,$Xi,$Hkey,$HK)=@_;
1009
1010 if (!defined($HK)) {    $HK = $T2;
1011 $code.=<<___;
1012         vpunpckhqdq     $Xi,$Xi,$T1
1013         vpunpckhqdq     $Hkey,$Hkey,$T2
1014         vpxor           $Xi,$T1,$T1             #
1015         vpxor           $Hkey,$T2,$T2
1016 ___
1017 } else {
1018 $code.=<<___;
1019         vpunpckhqdq     $Xi,$Xi,$T1
1020         vpxor           $Xi,$T1,$T1             #
1021 ___
1022 }
1023 $code.=<<___;
1024         vpclmulqdq      \$0x11,$Hkey,$Xi,$Xhi   #######
1025         vpclmulqdq      \$0x00,$Hkey,$Xi,$Xi    #######
1026         vpclmulqdq      \$0x00,$HK,$T1,$T1      #######
1027         vpxor           $Xi,$Xhi,$T2            #
1028         vpxor           $T2,$T1,$T1             #
1029
1030         vpslldq         \$8,$T1,$T2             #
1031         vpsrldq         \$8,$T1,$T1
1032         vpxor           $T2,$Xi,$Xi             #
1033         vpxor           $T1,$Xhi,$Xhi
1034 ___
1035 }
1036
1037 sub reduction_avx {
1038 my ($Xhi,$Xi) = @_;
1039
1040 $code.=<<___;
1041         vpsllq          \$57,$Xi,$T1            # 1st phase
1042         vpsllq          \$62,$Xi,$T2
1043         vpxor           $T1,$T2,$T2             #
1044         vpsllq          \$63,$Xi,$T1
1045         vpxor           $T1,$T2,$T2             #
1046         vpslldq         \$8,$T2,$T1             #
1047         vpsrldq         \$8,$T2,$T2
1048         vpxor           $T1,$Xi,$Xi             #
1049         vpxor           $T2,$Xhi,$Xhi
1050
1051         vpsrlq          \$1,$Xi,$T2             # 2nd phase
1052         vpxor           $Xi,$Xhi,$Xhi
1053         vpxor           $T2,$Xi,$Xi             #
1054         vpsrlq          \$5,$T2,$T2
1055         vpxor           $T2,$Xi,$Xi             #
1056         vpsrlq          \$1,$Xi,$Xi             #
1057         vpxor           $Xhi,$Xi,$Xi            #
1058 ___
1059 }
1060
1061 $code.=<<___;
1062 .align  32
1063 .Linit_loop_avx:
1064         vpalignr        \$8,$T1,$T2,$T3         # low part is H.lo^H.hi...
1065         vmovdqu         $T3,-0x10($Htbl)        # save Karatsuba "salt"
1066 ___
1067         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^3,5,7
1068         &reduction_avx  ($Xhi,$Xi);
1069 $code.=<<___;
1070 .Linit_start_avx:
1071         vmovdqa         $Xi,$T3
1072 ___
1073         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^2,4,6,8
1074         &reduction_avx  ($Xhi,$Xi);
1075 $code.=<<___;
1076         vpshufd         \$0b01001110,$T3,$T1
1077         vpshufd         \$0b01001110,$Xi,$T2
1078         vpxor           $T3,$T1,$T1             # Karatsuba pre-processing
1079         vmovdqu         $T3,0x00($Htbl)         # save H^1,3,5,7
1080         vpxor           $Xi,$T2,$T2             # Karatsuba pre-processing
1081         vmovdqu         $Xi,0x10($Htbl)         # save H^2,4,6,8
1082         lea             0x30($Htbl),$Htbl
1083         sub             \$1,%r10
1084         jnz             .Linit_loop_avx
1085
1086         vpalignr        \$8,$T2,$T1,$T3         # last "salt" is flipped
1087         vmovdqu         $T3,-0x10($Htbl)
1088
1089         vzeroupper
1090 ___
1091 $code.=<<___ if ($win64);
1092         movaps  (%rsp),%xmm6
1093         lea     0x18(%rsp),%rsp
1094 .LSEH_end_gcm_init_avx:
1095 ___
1096 $code.=<<___;
1097         ret
1098 .size   gcm_init_avx,.-gcm_init_avx
1099 ___
1100 } else {
1101 $code.=<<___;
1102         jmp     .L_init_clmul
1103 .size   gcm_init_avx,.-gcm_init_avx
1104 ___
1105 }
1106
1107 $code.=<<___;
1108 .globl  gcm_gmult_avx
1109 .type   gcm_gmult_avx,\@abi-omnipotent
1110 .align  32
1111 gcm_gmult_avx:
1112         jmp     .L_gmult_clmul
1113 .size   gcm_gmult_avx,.-gcm_gmult_avx
1114 ___
1115 \f
1116 $code.=<<___;
1117 .globl  gcm_ghash_avx
1118 .type   gcm_ghash_avx,\@abi-omnipotent
1119 .align  32
1120 gcm_ghash_avx:
1121 ___
1122 if ($avx) {
1123 my ($Xip,$Htbl,$inp,$len)=@_4args;
1124 my ($Xlo,$Xhi,$Xmi,
1125     $Zlo,$Zhi,$Zmi,
1126     $Hkey,$HK,$T1,$T2,
1127     $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1128
1129 $code.=<<___ if ($win64);
1130         lea     -0x88(%rsp),%rax
1131 .LSEH_begin_gcm_ghash_avx:
1132         # I can't trust assembler to use specific encoding:-(
1133         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1134         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
1135         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
1136         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
1137         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
1138         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
1139         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
1140         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
1141         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
1142         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
1143         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
1144 ___
1145 $code.=<<___;
1146         vzeroupper
1147
1148         vmovdqu         ($Xip),$Xi              # load $Xi
1149         lea             .L0x1c2_polynomial(%rip),%r10
1150         lea             0x40($Htbl),$Htbl       # size optimization
1151         vmovdqu         .Lbswap_mask(%rip),$bswap
1152         vpshufb         $bswap,$Xi,$Xi
1153         cmp             \$0x80,$len
1154         jb              .Lshort_avx
1155         sub             \$0x80,$len
1156
1157         vmovdqu         0x70($inp),$Ii          # I[7]
1158         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1159         vpshufb         $bswap,$Ii,$Ii
1160         vmovdqu         0x20-0x40($Htbl),$HK
1161
1162         vpunpckhqdq     $Ii,$Ii,$T2
1163          vmovdqu        0x60($inp),$Ij          # I[6]
1164         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1165         vpxor           $Ii,$T2,$T2
1166          vpshufb        $bswap,$Ij,$Ij
1167         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1168          vmovdqu        0x10-0x40($Htbl),$Hkey  # $Hkey^2
1169          vpunpckhqdq    $Ij,$Ij,$T1
1170          vmovdqu        0x50($inp),$Ii          # I[5]
1171         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1172          vpxor          $Ij,$T1,$T1
1173
1174          vpshufb        $bswap,$Ii,$Ii
1175         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1176          vpunpckhqdq    $Ii,$Ii,$T2
1177         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1178          vmovdqu        0x30-0x40($Htbl),$Hkey  # $Hkey^3
1179          vpxor          $Ii,$T2,$T2
1180          vmovdqu        0x40($inp),$Ij          # I[4]
1181         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1182          vmovdqu        0x50-0x40($Htbl),$HK
1183
1184          vpshufb        $bswap,$Ij,$Ij
1185         vpxor           $Xlo,$Zlo,$Zlo
1186         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1187         vpxor           $Xhi,$Zhi,$Zhi
1188          vpunpckhqdq    $Ij,$Ij,$T1
1189         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1190          vmovdqu        0x40-0x40($Htbl),$Hkey  # $Hkey^4
1191         vpxor           $Xmi,$Zmi,$Zmi
1192         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1193          vpxor          $Ij,$T1,$T1
1194
1195          vmovdqu        0x30($inp),$Ii          # I[3]
1196         vpxor           $Zlo,$Xlo,$Xlo
1197         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1198         vpxor           $Zhi,$Xhi,$Xhi
1199          vpshufb        $bswap,$Ii,$Ii
1200         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1201          vmovdqu        0x60-0x40($Htbl),$Hkey  # $Hkey^5
1202         vpxor           $Zmi,$Xmi,$Xmi
1203          vpunpckhqdq    $Ii,$Ii,$T2
1204         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1205          vmovdqu        0x80-0x40($Htbl),$HK
1206          vpxor          $Ii,$T2,$T2
1207
1208          vmovdqu        0x20($inp),$Ij          # I[2]
1209         vpxor           $Xlo,$Zlo,$Zlo
1210         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1211         vpxor           $Xhi,$Zhi,$Zhi
1212          vpshufb        $bswap,$Ij,$Ij
1213         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1214          vmovdqu        0x70-0x40($Htbl),$Hkey  # $Hkey^6
1215         vpxor           $Xmi,$Zmi,$Zmi
1216          vpunpckhqdq    $Ij,$Ij,$T1
1217         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1218          vpxor          $Ij,$T1,$T1
1219
1220          vmovdqu        0x10($inp),$Ii          # I[1]
1221         vpxor           $Zlo,$Xlo,$Xlo
1222         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1223         vpxor           $Zhi,$Xhi,$Xhi
1224          vpshufb        $bswap,$Ii,$Ii
1225         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1226          vmovdqu        0x90-0x40($Htbl),$Hkey  # $Hkey^7
1227         vpxor           $Zmi,$Xmi,$Xmi
1228          vpunpckhqdq    $Ii,$Ii,$T2
1229         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1230          vmovdqu        0xb0-0x40($Htbl),$HK
1231          vpxor          $Ii,$T2,$T2
1232
1233          vmovdqu        ($inp),$Ij              # I[0]
1234         vpxor           $Xlo,$Zlo,$Zlo
1235         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1236         vpxor           $Xhi,$Zhi,$Zhi
1237          vpshufb        $bswap,$Ij,$Ij
1238         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1239          vmovdqu        0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1240         vpxor           $Xmi,$Zmi,$Zmi
1241         vpclmulqdq      \$0x10,$HK,$T2,$Xmi
1242
1243         lea             0x80($inp),$inp
1244         cmp             \$0x80,$len
1245         jb              .Ltail_avx
1246
1247         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1248         sub             \$0x80,$len
1249         jmp             .Loop8x_avx
1250
1251 .align  32
1252 .Loop8x_avx:
1253         vpunpckhqdq     $Ij,$Ij,$T1
1254          vmovdqu        0x70($inp),$Ii          # I[7]
1255         vpxor           $Xlo,$Zlo,$Zlo
1256         vpxor           $Ij,$T1,$T1
1257         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xi
1258          vpshufb        $bswap,$Ii,$Ii
1259         vpxor           $Xhi,$Zhi,$Zhi
1260         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xo
1261          vmovdqu        0x00-0x40($Htbl),$Hkey  # $Hkey^1
1262          vpunpckhqdq    $Ii,$Ii,$T2
1263         vpxor           $Xmi,$Zmi,$Zmi
1264         vpclmulqdq      \$0x00,$HK,$T1,$Tred
1265          vmovdqu        0x20-0x40($Htbl),$HK
1266          vpxor          $Ii,$T2,$T2
1267
1268           vmovdqu       0x60($inp),$Ij          # I[6]
1269          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1270         vpxor           $Zlo,$Xi,$Xi            # collect result
1271           vpshufb       $bswap,$Ij,$Ij
1272          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1273         vxorps          $Zhi,$Xo,$Xo
1274           vmovdqu       0x10-0x40($Htbl),$Hkey  # $Hkey^2
1275          vpunpckhqdq    $Ij,$Ij,$T1
1276          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1277         vpxor           $Zmi,$Tred,$Tred
1278          vxorps         $Ij,$T1,$T1
1279
1280           vmovdqu       0x50($inp),$Ii          # I[5]
1281         vpxor           $Xi,$Tred,$Tred         # aggregated Karatsuba post-processing
1282          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1283         vpxor           $Xo,$Tred,$Tred
1284         vpslldq         \$8,$Tred,$T2
1285          vpxor          $Xlo,$Zlo,$Zlo
1286          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1287         vpsrldq         \$8,$Tred,$Tred
1288         vpxor           $T2, $Xi, $Xi
1289           vmovdqu       0x30-0x40($Htbl),$Hkey  # $Hkey^3
1290           vpshufb       $bswap,$Ii,$Ii
1291         vxorps          $Tred,$Xo, $Xo
1292          vpxor          $Xhi,$Zhi,$Zhi
1293          vpunpckhqdq    $Ii,$Ii,$T2
1294          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1295           vmovdqu       0x50-0x40($Htbl),$HK
1296          vpxor          $Ii,$T2,$T2
1297          vpxor          $Xmi,$Zmi,$Zmi
1298
1299           vmovdqu       0x40($inp),$Ij          # I[4]
1300         vpalignr        \$8,$Xi,$Xi,$Tred       # 1st phase
1301          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1302           vpshufb       $bswap,$Ij,$Ij
1303          vpxor          $Zlo,$Xlo,$Xlo
1304          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1305           vmovdqu       0x40-0x40($Htbl),$Hkey  # $Hkey^4
1306          vpunpckhqdq    $Ij,$Ij,$T1
1307          vpxor          $Zhi,$Xhi,$Xhi
1308          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1309          vxorps         $Ij,$T1,$T1
1310          vpxor          $Zmi,$Xmi,$Xmi
1311
1312           vmovdqu       0x30($inp),$Ii          # I[3]
1313         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1314          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1315           vpshufb       $bswap,$Ii,$Ii
1316          vpxor          $Xlo,$Zlo,$Zlo
1317          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1318           vmovdqu       0x60-0x40($Htbl),$Hkey  # $Hkey^5
1319          vpunpckhqdq    $Ii,$Ii,$T2
1320          vpxor          $Xhi,$Zhi,$Zhi
1321          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1322           vmovdqu       0x80-0x40($Htbl),$HK
1323          vpxor          $Ii,$T2,$T2
1324          vpxor          $Xmi,$Zmi,$Zmi
1325
1326           vmovdqu       0x20($inp),$Ij          # I[2]
1327          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1328           vpshufb       $bswap,$Ij,$Ij
1329          vpxor          $Zlo,$Xlo,$Xlo
1330          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1331           vmovdqu       0x70-0x40($Htbl),$Hkey  # $Hkey^6
1332          vpunpckhqdq    $Ij,$Ij,$T1
1333          vpxor          $Zhi,$Xhi,$Xhi
1334          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1335          vpxor          $Ij,$T1,$T1
1336          vpxor          $Zmi,$Xmi,$Xmi
1337         vxorps          $Tred,$Xi,$Xi
1338
1339           vmovdqu       0x10($inp),$Ii          # I[1]
1340         vpalignr        \$8,$Xi,$Xi,$Tred       # 2nd phase
1341          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1342           vpshufb       $bswap,$Ii,$Ii
1343          vpxor          $Xlo,$Zlo,$Zlo
1344          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1345           vmovdqu       0x90-0x40($Htbl),$Hkey  # $Hkey^7
1346         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1347         vxorps          $Xo,$Tred,$Tred
1348          vpunpckhqdq    $Ii,$Ii,$T2
1349          vpxor          $Xhi,$Zhi,$Zhi
1350          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1351           vmovdqu       0xb0-0x40($Htbl),$HK
1352          vpxor          $Ii,$T2,$T2
1353          vpxor          $Xmi,$Zmi,$Zmi
1354
1355           vmovdqu       ($inp),$Ij              # I[0]
1356          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1357           vpshufb       $bswap,$Ij,$Ij
1358          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1359           vmovdqu       0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1360         vpxor           $Tred,$Ij,$Ij
1361          vpclmulqdq     \$0x10,$HK,  $T2,$Xmi
1362         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1363
1364         lea             0x80($inp),$inp
1365         sub             \$0x80,$len
1366         jnc             .Loop8x_avx
1367
1368         add             \$0x80,$len
1369         jmp             .Ltail_no_xor_avx
1370
1371 .align  32
1372 .Lshort_avx:
1373         vmovdqu         -0x10($inp,$len),$Ii    # very last word
1374         lea             ($inp,$len),$inp
1375         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1376         vmovdqu         0x20-0x40($Htbl),$HK
1377         vpshufb         $bswap,$Ii,$Ij
1378
1379         vmovdqa         $Xlo,$Zlo               # subtle way to zero $Zlo,
1380         vmovdqa         $Xhi,$Zhi               # $Zhi and
1381         vmovdqa         $Xmi,$Zmi               # $Zmi
1382         sub             \$0x10,$len
1383         jz              .Ltail_avx
1384
1385         vpunpckhqdq     $Ij,$Ij,$T1
1386         vpxor           $Xlo,$Zlo,$Zlo
1387         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1388         vpxor           $Ij,$T1,$T1
1389          vmovdqu        -0x20($inp),$Ii
1390         vpxor           $Xhi,$Zhi,$Zhi
1391         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1392         vmovdqu         0x10-0x40($Htbl),$Hkey  # $Hkey^2
1393          vpshufb        $bswap,$Ii,$Ij
1394         vpxor           $Xmi,$Zmi,$Zmi
1395         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1396         vpsrldq         \$8,$HK,$HK
1397         sub             \$0x10,$len
1398         jz              .Ltail_avx
1399
1400         vpunpckhqdq     $Ij,$Ij,$T1
1401         vpxor           $Xlo,$Zlo,$Zlo
1402         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1403         vpxor           $Ij,$T1,$T1
1404          vmovdqu        -0x30($inp),$Ii
1405         vpxor           $Xhi,$Zhi,$Zhi
1406         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1407         vmovdqu         0x30-0x40($Htbl),$Hkey  # $Hkey^3
1408          vpshufb        $bswap,$Ii,$Ij
1409         vpxor           $Xmi,$Zmi,$Zmi
1410         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1411         vmovdqu         0x50-0x40($Htbl),$HK
1412         sub             \$0x10,$len
1413         jz              .Ltail_avx
1414
1415         vpunpckhqdq     $Ij,$Ij,$T1
1416         vpxor           $Xlo,$Zlo,$Zlo
1417         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1418         vpxor           $Ij,$T1,$T1
1419          vmovdqu        -0x40($inp),$Ii
1420         vpxor           $Xhi,$Zhi,$Zhi
1421         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1422         vmovdqu         0x40-0x40($Htbl),$Hkey  # $Hkey^4
1423          vpshufb        $bswap,$Ii,$Ij
1424         vpxor           $Xmi,$Zmi,$Zmi
1425         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1426         vpsrldq         \$8,$HK,$HK
1427         sub             \$0x10,$len
1428         jz              .Ltail_avx
1429
1430         vpunpckhqdq     $Ij,$Ij,$T1
1431         vpxor           $Xlo,$Zlo,$Zlo
1432         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1433         vpxor           $Ij,$T1,$T1
1434          vmovdqu        -0x50($inp),$Ii
1435         vpxor           $Xhi,$Zhi,$Zhi
1436         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1437         vmovdqu         0x60-0x40($Htbl),$Hkey  # $Hkey^5
1438          vpshufb        $bswap,$Ii,$Ij
1439         vpxor           $Xmi,$Zmi,$Zmi
1440         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1441         vmovdqu         0x80-0x40($Htbl),$HK
1442         sub             \$0x10,$len
1443         jz              .Ltail_avx
1444
1445         vpunpckhqdq     $Ij,$Ij,$T1
1446         vpxor           $Xlo,$Zlo,$Zlo
1447         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1448         vpxor           $Ij,$T1,$T1
1449          vmovdqu        -0x60($inp),$Ii
1450         vpxor           $Xhi,$Zhi,$Zhi
1451         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1452         vmovdqu         0x70-0x40($Htbl),$Hkey  # $Hkey^6
1453          vpshufb        $bswap,$Ii,$Ij
1454         vpxor           $Xmi,$Zmi,$Zmi
1455         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1456         vpsrldq         \$8,$HK,$HK
1457         sub             \$0x10,$len
1458         jz              .Ltail_avx
1459
1460         vpunpckhqdq     $Ij,$Ij,$T1
1461         vpxor           $Xlo,$Zlo,$Zlo
1462         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1463         vpxor           $Ij,$T1,$T1
1464          vmovdqu        -0x70($inp),$Ii
1465         vpxor           $Xhi,$Zhi,$Zhi
1466         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1467         vmovdqu         0x90-0x40($Htbl),$Hkey  # $Hkey^7
1468          vpshufb        $bswap,$Ii,$Ij
1469         vpxor           $Xmi,$Zmi,$Zmi
1470         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1471         vmovq           0xb8-0x40($Htbl),$HK
1472         sub             \$0x10,$len
1473         jmp             .Ltail_avx
1474
1475 .align  32
1476 .Ltail_avx:
1477         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1478 .Ltail_no_xor_avx:
1479         vpunpckhqdq     $Ij,$Ij,$T1
1480         vpxor           $Xlo,$Zlo,$Zlo
1481         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1482         vpxor           $Ij,$T1,$T1
1483         vpxor           $Xhi,$Zhi,$Zhi
1484         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1485         vpxor           $Xmi,$Zmi,$Zmi
1486         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1487
1488         vmovdqu         (%r10),$Tred
1489
1490         vpxor           $Xlo,$Zlo,$Xi
1491         vpxor           $Xhi,$Zhi,$Xo
1492         vpxor           $Xmi,$Zmi,$Zmi
1493
1494         vpxor           $Xi, $Zmi,$Zmi          # aggregated Karatsuba post-processing
1495         vpxor           $Xo, $Zmi,$Zmi
1496         vpslldq         \$8, $Zmi,$T2
1497         vpsrldq         \$8, $Zmi,$Zmi
1498         vpxor           $T2, $Xi, $Xi
1499         vpxor           $Zmi,$Xo, $Xo
1500
1501         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 1st phase
1502         vpalignr        \$8,$Xi,$Xi,$Xi
1503         vpxor           $T2,$Xi,$Xi
1504
1505         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 2nd phase
1506         vpalignr        \$8,$Xi,$Xi,$Xi
1507         vpxor           $Xo,$Xi,$Xi
1508         vpxor           $T2,$Xi,$Xi
1509
1510         cmp             \$0,$len
1511         jne             .Lshort_avx
1512
1513         vpshufb         $bswap,$Xi,$Xi
1514         vmovdqu         $Xi,($Xip)
1515         vzeroupper
1516 ___
1517 $code.=<<___ if ($win64);
1518         movaps  (%rsp),%xmm6
1519         movaps  0x10(%rsp),%xmm7
1520         movaps  0x20(%rsp),%xmm8
1521         movaps  0x30(%rsp),%xmm9
1522         movaps  0x40(%rsp),%xmm10
1523         movaps  0x50(%rsp),%xmm11
1524         movaps  0x60(%rsp),%xmm12
1525         movaps  0x70(%rsp),%xmm13
1526         movaps  0x80(%rsp),%xmm14
1527         movaps  0x90(%rsp),%xmm15
1528         lea     0xa8(%rsp),%rsp
1529 .LSEH_end_gcm_ghash_avx:
1530 ___
1531 $code.=<<___;
1532         ret
1533 .size   gcm_ghash_avx,.-gcm_ghash_avx
1534 ___
1535 } else {
1536 $code.=<<___;
1537         jmp     .L_ghash_clmul
1538 .size   gcm_ghash_avx,.-gcm_ghash_avx
1539 ___
1540 }
1541 \f
1542 $code.=<<___;
1543 .align  64
1544 .Lbswap_mask:
1545         .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1546 .L0x1c2_polynomial:
1547         .byte   1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1548 .L7_mask:
1549         .long   7,0,7,0
1550 .L7_mask_poly:
1551         .long   7,0,`0xE1<<1`,0
1552 .align  64
1553 .type   .Lrem_4bit,\@object
1554 .Lrem_4bit:
1555         .long   0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1556         .long   0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1557         .long   0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1558         .long   0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1559 .type   .Lrem_8bit,\@object
1560 .Lrem_8bit:
1561         .value  0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1562         .value  0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1563         .value  0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1564         .value  0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1565         .value  0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1566         .value  0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1567         .value  0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1568         .value  0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1569         .value  0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1570         .value  0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1571         .value  0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1572         .value  0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1573         .value  0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1574         .value  0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1575         .value  0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1576         .value  0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1577         .value  0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1578         .value  0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1579         .value  0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1580         .value  0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1581         .value  0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1582         .value  0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1583         .value  0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1584         .value  0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1585         .value  0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1586         .value  0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1587         .value  0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1588         .value  0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1589         .value  0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1590         .value  0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1591         .value  0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1592         .value  0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1593
1594 .asciz  "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1595 .align  64
1596 ___
1597 \f
1598 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1599 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1600 if ($win64) {
1601 $rec="%rcx";
1602 $frame="%rdx";
1603 $context="%r8";
1604 $disp="%r9";
1605
1606 $code.=<<___;
1607 .extern __imp_RtlVirtualUnwind
1608 .type   se_handler,\@abi-omnipotent
1609 .align  16
1610 se_handler:
1611         push    %rsi
1612         push    %rdi
1613         push    %rbx
1614         push    %rbp
1615         push    %r12
1616         push    %r13
1617         push    %r14
1618         push    %r15
1619         pushfq
1620         sub     \$64,%rsp
1621
1622         mov     120($context),%rax      # pull context->Rax
1623         mov     248($context),%rbx      # pull context->Rip
1624
1625         mov     8($disp),%rsi           # disp->ImageBase
1626         mov     56($disp),%r11          # disp->HandlerData
1627
1628         mov     0(%r11),%r10d           # HandlerData[0]
1629         lea     (%rsi,%r10),%r10        # prologue label
1630         cmp     %r10,%rbx               # context->Rip<prologue label
1631         jb      .Lin_prologue
1632
1633         mov     152($context),%rax      # pull context->Rsp
1634
1635         mov     4(%r11),%r10d           # HandlerData[1]
1636         lea     (%rsi,%r10),%r10        # epilogue label
1637         cmp     %r10,%rbx               # context->Rip>=epilogue label
1638         jae     .Lin_prologue
1639
1640         lea     24(%rax),%rax           # adjust "rsp"
1641
1642         mov     -8(%rax),%rbx
1643         mov     -16(%rax),%rbp
1644         mov     -24(%rax),%r12
1645         mov     %rbx,144($context)      # restore context->Rbx
1646         mov     %rbp,160($context)      # restore context->Rbp
1647         mov     %r12,216($context)      # restore context->R12
1648
1649 .Lin_prologue:
1650         mov     8(%rax),%rdi
1651         mov     16(%rax),%rsi
1652         mov     %rax,152($context)      # restore context->Rsp
1653         mov     %rsi,168($context)      # restore context->Rsi
1654         mov     %rdi,176($context)      # restore context->Rdi
1655
1656         mov     40($disp),%rdi          # disp->ContextRecord
1657         mov     $context,%rsi           # context
1658         mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
1659         .long   0xa548f3fc              # cld; rep movsq
1660
1661         mov     $disp,%rsi
1662         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1663         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1664         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1665         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1666         mov     40(%rsi),%r10           # disp->ContextRecord
1667         lea     56(%rsi),%r11           # &disp->HandlerData
1668         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1669         mov     %r10,32(%rsp)           # arg5
1670         mov     %r11,40(%rsp)           # arg6
1671         mov     %r12,48(%rsp)           # arg7
1672         mov     %rcx,56(%rsp)           # arg8, (NULL)
1673         call    *__imp_RtlVirtualUnwind(%rip)
1674
1675         mov     \$1,%eax                # ExceptionContinueSearch
1676         add     \$64,%rsp
1677         popfq
1678         pop     %r15
1679         pop     %r14
1680         pop     %r13
1681         pop     %r12
1682         pop     %rbp
1683         pop     %rbx
1684         pop     %rdi
1685         pop     %rsi
1686         ret
1687 .size   se_handler,.-se_handler
1688
1689 .section        .pdata
1690 .align  4
1691         .rva    .LSEH_begin_gcm_gmult_4bit
1692         .rva    .LSEH_end_gcm_gmult_4bit
1693         .rva    .LSEH_info_gcm_gmult_4bit
1694
1695         .rva    .LSEH_begin_gcm_ghash_4bit
1696         .rva    .LSEH_end_gcm_ghash_4bit
1697         .rva    .LSEH_info_gcm_ghash_4bit
1698
1699         .rva    .LSEH_begin_gcm_init_clmul
1700         .rva    .LSEH_end_gcm_init_clmul
1701         .rva    .LSEH_info_gcm_init_clmul
1702
1703         .rva    .LSEH_begin_gcm_ghash_clmul
1704         .rva    .LSEH_end_gcm_ghash_clmul
1705         .rva    .LSEH_info_gcm_ghash_clmul
1706 ___
1707 $code.=<<___    if ($avx);
1708         .rva    .LSEH_begin_gcm_init_avx
1709         .rva    .LSEH_end_gcm_init_avx
1710         .rva    .LSEH_info_gcm_init_clmul
1711
1712         .rva    .LSEH_begin_gcm_ghash_avx
1713         .rva    .LSEH_end_gcm_ghash_avx
1714         .rva    .LSEH_info_gcm_ghash_clmul
1715 ___
1716 $code.=<<___;
1717 .section        .xdata
1718 .align  8
1719 .LSEH_info_gcm_gmult_4bit:
1720         .byte   9,0,0,0
1721         .rva    se_handler
1722         .rva    .Lgmult_prologue,.Lgmult_epilogue       # HandlerData
1723 .LSEH_info_gcm_ghash_4bit:
1724         .byte   9,0,0,0
1725         .rva    se_handler
1726         .rva    .Lghash_prologue,.Lghash_epilogue       # HandlerData
1727 .LSEH_info_gcm_init_clmul:
1728         .byte   0x01,0x08,0x03,0x00
1729         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1730         .byte   0x04,0x22,0x00,0x00     #sub    rsp,0x18
1731 .LSEH_info_gcm_ghash_clmul:
1732         .byte   0x01,0x33,0x16,0x00
1733         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1734         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1735         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1736         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1737         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1738         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1739         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1740         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1741         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1742         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1743         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1744 ___
1745 }
1746 \f
1747 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1748
1749 print $code;
1750
1751 close STDOUT;