PA-RISC assembler pack: switch to bve in 64-bit builds.
[openssl.git] / crypto / modes / asm / ghash-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # March, June 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
19 #
20 #               gcc 3.4.x(*)    assembler
21 #
22 # P4            28.6            14.0            +100%
23 # Opteron       19.3            7.7             +150%
24 # Core2         17.8            8.1(**)         +120%
25 # Atom          31.6            16.8            +88%
26 # VIA Nano      21.8            10.1            +115%
27 #
28 # (*)   comparison is not completely fair, because C results are
29 #       for vanilla "256B" implementation, while assembler results
30 #       are for "528B";-)
31 # (**)  it's mystery [to me] why Core2 result is not same as for
32 #       Opteron;
33
34 # May 2010
35 #
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
38 # techniques.
39 #
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
43
44 # December 2012
45 #
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
61 #
62 # Westmere      1.76(+14%)
63 # Sandy Bridge  1.79(+9%)
64 # Ivy Bridge    1.79(+8%)
65 # Haswell       0.55(+93%) (if system doesn't support AVX)
66 # Bulldozer     1.52(+25%)
67
68 # March 2013
69 #
70 # ... 8x aggregate factor AVX code path is using reduction algorithm
71 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
72 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
73 # sub-optimally in comparison to above mentioned version. But thanks
74 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
75 # it performs in 0.41 cycles per byte on Haswell processor.
76 #
77 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
78
79 $flavour = shift;
80 $output  = shift;
81 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
82
83 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
84
85 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
86 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
87 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
88 die "can't locate x86_64-xlate.pl";
89
90 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
91                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
92         $avx = ($1>=2.19) + ($1>=2.22);
93 }
94
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97         $avx = ($1>=2.09) + ($1>=2.10);
98 }
99
100 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
101             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
102         $avx = ($1>=10) + ($1>=11);
103 }
104
105 open OUT,"| \"$^X\" $xlate $flavour $output";
106 *STDOUT=*OUT;
107
108 $do4xaggr=1;
109
110 # common register layout
111 $nlo="%rax";
112 $nhi="%rbx";
113 $Zlo="%r8";
114 $Zhi="%r9";
115 $tmp="%r10";
116 $rem_4bit = "%r11";
117
118 $Xi="%rdi";
119 $Htbl="%rsi";
120
121 # per-function register layout
122 $cnt="%rcx";
123 $rem="%rdx";
124
125 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/     or
126                         $r =~ s/%[er]([sd]i)/%\1l/      or
127                         $r =~ s/%[er](bp)/%\1l/         or
128                         $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
129
130 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
131 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
132   my $arg = pop;
133     $arg = "\$$arg" if ($arg*1 eq $arg);
134     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
135 }
136 \f
137 { my $N;
138   sub loop() {
139   my $inp = shift;
140
141         $N++;
142 $code.=<<___;
143         xor     $nlo,$nlo
144         xor     $nhi,$nhi
145         mov     `&LB("$Zlo")`,`&LB("$nlo")`
146         mov     `&LB("$Zlo")`,`&LB("$nhi")`
147         shl     \$4,`&LB("$nlo")`
148         mov     \$14,$cnt
149         mov     8($Htbl,$nlo),$Zlo
150         mov     ($Htbl,$nlo),$Zhi
151         and     \$0xf0,`&LB("$nhi")`
152         mov     $Zlo,$rem
153         jmp     .Loop$N
154
155 .align  16
156 .Loop$N:
157         shr     \$4,$Zlo
158         and     \$0xf,$rem
159         mov     $Zhi,$tmp
160         mov     ($inp,$cnt),`&LB("$nlo")`
161         shr     \$4,$Zhi
162         xor     8($Htbl,$nhi),$Zlo
163         shl     \$60,$tmp
164         xor     ($Htbl,$nhi),$Zhi
165         mov     `&LB("$nlo")`,`&LB("$nhi")`
166         xor     ($rem_4bit,$rem,8),$Zhi
167         mov     $Zlo,$rem
168         shl     \$4,`&LB("$nlo")`
169         xor     $tmp,$Zlo
170         dec     $cnt
171         js      .Lbreak$N
172
173         shr     \$4,$Zlo
174         and     \$0xf,$rem
175         mov     $Zhi,$tmp
176         shr     \$4,$Zhi
177         xor     8($Htbl,$nlo),$Zlo
178         shl     \$60,$tmp
179         xor     ($Htbl,$nlo),$Zhi
180         and     \$0xf0,`&LB("$nhi")`
181         xor     ($rem_4bit,$rem,8),$Zhi
182         mov     $Zlo,$rem
183         xor     $tmp,$Zlo
184         jmp     .Loop$N
185
186 .align  16
187 .Lbreak$N:
188         shr     \$4,$Zlo
189         and     \$0xf,$rem
190         mov     $Zhi,$tmp
191         shr     \$4,$Zhi
192         xor     8($Htbl,$nlo),$Zlo
193         shl     \$60,$tmp
194         xor     ($Htbl,$nlo),$Zhi
195         and     \$0xf0,`&LB("$nhi")`
196         xor     ($rem_4bit,$rem,8),$Zhi
197         mov     $Zlo,$rem
198         xor     $tmp,$Zlo
199
200         shr     \$4,$Zlo
201         and     \$0xf,$rem
202         mov     $Zhi,$tmp
203         shr     \$4,$Zhi
204         xor     8($Htbl,$nhi),$Zlo
205         shl     \$60,$tmp
206         xor     ($Htbl,$nhi),$Zhi
207         xor     $tmp,$Zlo
208         xor     ($rem_4bit,$rem,8),$Zhi
209
210         bswap   $Zlo
211         bswap   $Zhi
212 ___
213 }}
214
215 $code=<<___;
216 .text
217
218 .globl  gcm_gmult_4bit
219 .type   gcm_gmult_4bit,\@function,2
220 .align  16
221 gcm_gmult_4bit:
222         push    %rbx
223         push    %rbp            # %rbp and %r12 are pushed exclusively in
224         push    %r12            # order to reuse Win64 exception handler...
225 .Lgmult_prologue:
226
227         movzb   15($Xi),$Zlo
228         lea     .Lrem_4bit(%rip),$rem_4bit
229 ___
230         &loop   ($Xi);
231 $code.=<<___;
232         mov     $Zlo,8($Xi)
233         mov     $Zhi,($Xi)
234
235         mov     16(%rsp),%rbx
236         lea     24(%rsp),%rsp
237 .Lgmult_epilogue:
238         ret
239 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
240 ___
241 \f
242 # per-function register layout
243 $inp="%rdx";
244 $len="%rcx";
245 $rem_8bit=$rem_4bit;
246
247 $code.=<<___;
248 .globl  gcm_ghash_4bit
249 .type   gcm_ghash_4bit,\@function,4
250 .align  16
251 gcm_ghash_4bit:
252         push    %rbx
253         push    %rbp
254         push    %r12
255         push    %r13
256         push    %r14
257         push    %r15
258         sub     \$280,%rsp
259 .Lghash_prologue:
260         mov     $inp,%r14               # reassign couple of args
261         mov     $len,%r15
262 ___
263 { my $inp="%r14";
264   my $dat="%edx";
265   my $len="%r15";
266   my @nhi=("%ebx","%ecx");
267   my @rem=("%r12","%r13");
268   my $Hshr4="%rbp";
269
270         &sub    ($Htbl,-128);           # size optimization
271         &lea    ($Hshr4,"16+128(%rsp)");
272         { my @lo =($nlo,$nhi);
273           my @hi =($Zlo,$Zhi);
274
275           &xor  ($dat,$dat);
276           for ($i=0,$j=-2;$i<18;$i++,$j++) {
277             &mov        ("$j(%rsp)",&LB($dat))          if ($i>1);
278             &or         ($lo[0],$tmp)                   if ($i>1);
279             &mov        (&LB($dat),&LB($lo[1]))         if ($i>0 && $i<17);
280             &shr        ($lo[1],4)                      if ($i>0 && $i<17);
281             &mov        ($tmp,$hi[1])                   if ($i>0 && $i<17);
282             &shr        ($hi[1],4)                      if ($i>0 && $i<17);
283             &mov        ("8*$j($Hshr4)",$hi[0])         if ($i>1);
284             &mov        ($hi[0],"16*$i+0-128($Htbl)")   if ($i<16);
285             &shl        (&LB($dat),4)                   if ($i>0 && $i<17);
286             &mov        ("8*$j-128($Hshr4)",$lo[0])     if ($i>1);
287             &mov        ($lo[0],"16*$i+8-128($Htbl)")   if ($i<16);
288             &shl        ($tmp,60)                       if ($i>0 && $i<17);
289
290             push        (@lo,shift(@lo));
291             push        (@hi,shift(@hi));
292           }
293         }
294         &add    ($Htbl,-128);
295         &mov    ($Zlo,"8($Xi)");
296         &mov    ($Zhi,"0($Xi)");
297         &add    ($len,$inp);            # pointer to the end of data
298         &lea    ($rem_8bit,".Lrem_8bit(%rip)");
299         &jmp    (".Louter_loop");
300
301 $code.=".align  16\n.Louter_loop:\n";
302         &xor    ($Zhi,"($inp)");
303         &mov    ("%rdx","8($inp)");
304         &lea    ($inp,"16($inp)");
305         &xor    ("%rdx",$Zlo);
306         &mov    ("($Xi)",$Zhi);
307         &mov    ("8($Xi)","%rdx");
308         &shr    ("%rdx",32);
309
310         &xor    ($nlo,$nlo);
311         &rol    ($dat,8);
312         &mov    (&LB($nlo),&LB($dat));
313         &movz   ($nhi[0],&LB($dat));
314         &shl    (&LB($nlo),4);
315         &shr    ($nhi[0],4);
316
317         for ($j=11,$i=0;$i<15;$i++) {
318             &rol        ($dat,8);
319             &xor        ($Zlo,"8($Htbl,$nlo)")                  if ($i>0);
320             &xor        ($Zhi,"($Htbl,$nlo)")                   if ($i>0);
321             &mov        ($Zlo,"8($Htbl,$nlo)")                  if ($i==0);
322             &mov        ($Zhi,"($Htbl,$nlo)")                   if ($i==0);
323
324             &mov        (&LB($nlo),&LB($dat));
325             &xor        ($Zlo,$tmp)                             if ($i>0);
326             &movzw      ($rem[1],"($rem_8bit,$rem[1],2)")       if ($i>0);
327
328             &movz       ($nhi[1],&LB($dat));
329             &shl        (&LB($nlo),4);
330             &movzb      ($rem[0],"(%rsp,$nhi[0])");
331
332             &shr        ($nhi[1],4)                             if ($i<14);
333             &and        ($nhi[1],0xf0)                          if ($i==14);
334             &shl        ($rem[1],48)                            if ($i>0);
335             &xor        ($rem[0],$Zlo);
336
337             &mov        ($tmp,$Zhi);
338             &xor        ($Zhi,$rem[1])                          if ($i>0);
339             &shr        ($Zlo,8);
340
341             &movz       ($rem[0],&LB($rem[0]));
342             &mov        ($dat,"$j($Xi)")                        if (--$j%4==0);
343             &shr        ($Zhi,8);
344
345             &xor        ($Zlo,"-128($Hshr4,$nhi[0],8)");
346             &shl        ($tmp,56);
347             &xor        ($Zhi,"($Hshr4,$nhi[0],8)");
348
349             unshift     (@nhi,pop(@nhi));               # "rotate" registers
350             unshift     (@rem,pop(@rem));
351         }
352         &movzw  ($rem[1],"($rem_8bit,$rem[1],2)");
353         &xor    ($Zlo,"8($Htbl,$nlo)");
354         &xor    ($Zhi,"($Htbl,$nlo)");
355
356         &shl    ($rem[1],48);
357         &xor    ($Zlo,$tmp);
358
359         &xor    ($Zhi,$rem[1]);
360         &movz   ($rem[0],&LB($Zlo));
361         &shr    ($Zlo,4);
362
363         &mov    ($tmp,$Zhi);
364         &shl    (&LB($rem[0]),4);
365         &shr    ($Zhi,4);
366
367         &xor    ($Zlo,"8($Htbl,$nhi[0])");
368         &movzw  ($rem[0],"($rem_8bit,$rem[0],2)");
369         &shl    ($tmp,60);
370
371         &xor    ($Zhi,"($Htbl,$nhi[0])");
372         &xor    ($Zlo,$tmp);
373         &shl    ($rem[0],48);
374
375         &bswap  ($Zlo);
376         &xor    ($Zhi,$rem[0]);
377
378         &bswap  ($Zhi);
379         &cmp    ($inp,$len);
380         &jb     (".Louter_loop");
381 }
382 $code.=<<___;
383         mov     $Zlo,8($Xi)
384         mov     $Zhi,($Xi)
385
386         lea     280(%rsp),%rsi
387         mov     0(%rsi),%r15
388         mov     8(%rsi),%r14
389         mov     16(%rsi),%r13
390         mov     24(%rsi),%r12
391         mov     32(%rsi),%rbp
392         mov     40(%rsi),%rbx
393         lea     48(%rsi),%rsp
394 .Lghash_epilogue:
395         ret
396 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
397 ___
398 \f
399 ######################################################################
400 # PCLMULQDQ version.
401
402 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
403                 ("%rdi","%rsi","%rdx","%rcx");  # Unix order
404
405 ($Xi,$Xhi)=("%xmm0","%xmm1");   $Hkey="%xmm2";
406 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
407
408 sub clmul64x64_T2 {     # minimal register pressure
409 my ($Xhi,$Xi,$Hkey,$HK)=@_;
410
411 if (!defined($HK)) {    $HK = $T2;
412 $code.=<<___;
413         movdqa          $Xi,$Xhi                #
414         pshufd          \$0b01001110,$Xi,$T1
415         pshufd          \$0b01001110,$Hkey,$T2
416         pxor            $Xi,$T1                 #
417         pxor            $Hkey,$T2
418 ___
419 } else {
420 $code.=<<___;
421         movdqa          $Xi,$Xhi                #
422         pshufd          \$0b01001110,$Xi,$T1
423         pxor            $Xi,$T1                 #
424 ___
425 }
426 $code.=<<___;
427         pclmulqdq       \$0x00,$Hkey,$Xi        #######
428         pclmulqdq       \$0x11,$Hkey,$Xhi       #######
429         pclmulqdq       \$0x00,$HK,$T1          #######
430         pxor            $Xi,$T1                 #
431         pxor            $Xhi,$T1                #
432
433         movdqa          $T1,$T2                 #
434         psrldq          \$8,$T1
435         pslldq          \$8,$T2                 #
436         pxor            $T1,$Xhi
437         pxor            $T2,$Xi                 #
438 ___
439 }
440
441 sub reduction_alg9 {    # 17/11 times faster than Intel version
442 my ($Xhi,$Xi) = @_;
443
444 $code.=<<___;
445         # 1st phase
446         movdqa          $Xi,$T2                 #
447         movdqa          $Xi,$T1
448         psllq           \$5,$Xi
449         pxor            $Xi,$T1                 #
450         psllq           \$1,$Xi
451         pxor            $T1,$Xi                 #
452         psllq           \$57,$Xi                #
453         movdqa          $Xi,$T1                 #
454         pslldq          \$8,$Xi
455         psrldq          \$8,$T1                 #       
456         pxor            $T2,$Xi
457         pxor            $T1,$Xhi                #
458
459         # 2nd phase
460         movdqa          $Xi,$T2
461         psrlq           \$1,$Xi
462         pxor            $T2,$Xhi                #
463         pxor            $Xi,$T2
464         psrlq           \$5,$Xi
465         pxor            $T2,$Xi                 #
466         psrlq           \$1,$Xi                 #
467         pxor            $Xhi,$Xi                #
468 ___
469 }
470 \f
471 { my ($Htbl,$Xip)=@_4args;
472   my $HK="%xmm6";
473
474 $code.=<<___;
475 .globl  gcm_init_clmul
476 .type   gcm_init_clmul,\@abi-omnipotent
477 .align  16
478 gcm_init_clmul:
479 .L_init_clmul:
480 ___
481 $code.=<<___ if ($win64);
482 .LSEH_begin_gcm_init_clmul:
483         # I can't trust assembler to use specific encoding:-(
484         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
485         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
486 ___
487 $code.=<<___;
488         movdqu          ($Xip),$Hkey
489         pshufd          \$0b01001110,$Hkey,$Hkey        # dword swap
490
491         # <<1 twist
492         pshufd          \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
493         movdqa          $Hkey,$T1
494         psllq           \$1,$Hkey
495         pxor            $T3,$T3                 #
496         psrlq           \$63,$T1
497         pcmpgtd         $T2,$T3                 # broadcast carry bit
498         pslldq          \$8,$T1
499         por             $T1,$Hkey               # H<<=1
500
501         # magic reduction
502         pand            .L0x1c2_polynomial(%rip),$T3
503         pxor            $T3,$Hkey               # if(carry) H^=0x1c2_polynomial
504
505         # calculate H^2
506         pshufd          \$0b01001110,$Hkey,$HK
507         movdqa          $Hkey,$Xi
508         pxor            $Hkey,$HK
509 ___
510         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);
511         &reduction_alg9 ($Xhi,$Xi);
512 $code.=<<___;
513         pshufd          \$0b01001110,$Hkey,$T1
514         pshufd          \$0b01001110,$Xi,$T2
515         pxor            $Hkey,$T1               # Karatsuba pre-processing
516         movdqu          $Hkey,0x00($Htbl)       # save H
517         pxor            $Xi,$T2                 # Karatsuba pre-processing
518         movdqu          $Xi,0x10($Htbl)         # save H^2
519         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
520         movdqu          $T2,0x20($Htbl)         # save Karatsuba "salt"
521 ___
522 if ($do4xaggr) {
523         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^3
524         &reduction_alg9 ($Xhi,$Xi);
525 $code.=<<___;
526         movdqa          $Xi,$T3
527 ___
528         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H^4
529         &reduction_alg9 ($Xhi,$Xi);
530 $code.=<<___;
531         pshufd          \$0b01001110,$T3,$T1
532         pshufd          \$0b01001110,$Xi,$T2
533         pxor            $T3,$T1                 # Karatsuba pre-processing
534         movdqu          $T3,0x30($Htbl)         # save H^3
535         pxor            $Xi,$T2                 # Karatsuba pre-processing
536         movdqu          $Xi,0x40($Htbl)         # save H^4
537         palignr         \$8,$T1,$T2             # low part is H^3.lo^H^3.hi...
538         movdqu          $T2,0x50($Htbl)         # save Karatsuba "salt"
539 ___
540 }
541 $code.=<<___ if ($win64);
542         movaps  (%rsp),%xmm6
543         lea     0x18(%rsp),%rsp
544 .LSEH_end_gcm_init_clmul:
545 ___
546 $code.=<<___;
547         ret
548 .size   gcm_init_clmul,.-gcm_init_clmul
549 ___
550 }
551
552 { my ($Xip,$Htbl)=@_4args;
553
554 $code.=<<___;
555 .globl  gcm_gmult_clmul
556 .type   gcm_gmult_clmul,\@abi-omnipotent
557 .align  16
558 gcm_gmult_clmul:
559 .L_gmult_clmul:
560         movdqu          ($Xip),$Xi
561         movdqa          .Lbswap_mask(%rip),$T3
562         movdqu          ($Htbl),$Hkey
563         movdqu          0x20($Htbl),$T2
564         pshufb          $T3,$Xi
565 ___
566         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
567 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
568         # experimental alternative. special thing about is that there
569         # no dependency between the two multiplications... 
570         mov             \$`0xE1<<1`,%eax
571         mov             \$0xA040608020C0E000,%r10       # ((7..0)·0xE0)&0xff
572         mov             \$0x07,%r11d
573         movq            %rax,$T1
574         movq            %r10,$T2
575         movq            %r11,$T3                # borrow $T3
576         pand            $Xi,$T3
577         pshufb          $T3,$T2                 # ($Xi&7)·0xE0
578         movq            %rax,$T3
579         pclmulqdq       \$0x00,$Xi,$T1          # Â·(0xE1<<1)
580         pxor            $Xi,$T2
581         pslldq          \$15,$T2
582         paddd           $T2,$T2                 # <<(64+56+1)
583         pxor            $T2,$Xi
584         pclmulqdq       \$0x01,$T3,$Xi
585         movdqa          .Lbswap_mask(%rip),$T3  # reload $T3
586         psrldq          \$1,$T1
587         pxor            $T1,$Xhi
588         pslldq          \$7,$Xi
589         pxor            $Xhi,$Xi
590 ___
591 $code.=<<___;
592         pshufb          $T3,$Xi
593         movdqu          $Xi,($Xip)
594         ret
595 .size   gcm_gmult_clmul,.-gcm_gmult_clmul
596 ___
597 }
598 \f
599 { my ($Xip,$Htbl,$inp,$len)=@_4args;
600   my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(6..10));
601
602 $code.=<<___;
603 .globl  gcm_ghash_clmul
604 .type   gcm_ghash_clmul,\@abi-omnipotent
605 .align  32
606 gcm_ghash_clmul:
607 .L_ghash_clmul:
608 ___
609 $code.=<<___ if ($win64);
610         lea     -0x88(%rsp),%rax
611 .LSEH_begin_gcm_ghash_clmul:
612         # I can't trust assembler to use specific encoding:-(
613         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
614         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
615         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
616         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
617         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
618         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
619         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
620         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
621         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
622         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
623         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
624 ___
625 $code.=<<___;
626         movdqa          .Lbswap_mask(%rip),$T3
627         mov             \$0xA040608020C0E000,%rax       # ((7..0)·0xE0)&0xff
628
629         movdqu          ($Xip),$Xi
630         movdqu          ($Htbl),$Hkey
631         movdqu          0x20($Htbl),$HK
632         pshufb          $T3,$Xi
633
634         sub             \$0x10,$len
635         jz              .Lodd_tail
636
637         movdqu          0x10($Htbl),$Hkey2
638 ___
639 if ($do4xaggr) {
640 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
641
642 $code.=<<___;
643         cmp             \$0x30,$len
644         jb              .Lskip4x
645
646         sub             \$0x30,$len
647         movdqu          0x30($Htbl),$Hkey3
648         movdqu          0x40($Htbl),$Hkey4
649
650         #######
651         # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
652         #
653         movdqu          0x30($inp),$Xln
654          movdqu         0x20($inp),$Xl
655         pshufb          $T3,$Xln
656          pshufb         $T3,$Xl
657         movdqa          $Xln,$Xhn
658         pshufd          \$0b01001110,$Xln,$Xmn
659         pxor            $Xln,$Xmn
660         pclmulqdq       \$0x00,$Hkey,$Xln
661         pclmulqdq       \$0x11,$Hkey,$Xhn
662         pclmulqdq       \$0x00,$HK,$Xmn
663
664         movdqa          $Xl,$Xh
665         pshufd          \$0b01001110,$Xl,$Xm
666         pxor            $Xl,$Xm
667         pclmulqdq       \$0x00,$Hkey2,$Xl
668         pclmulqdq       \$0x11,$Hkey2,$Xh
669         xorps           $Xl,$Xln
670         pclmulqdq       \$0x10,$HK,$Xm
671         xorps           $Xh,$Xhn
672         movups          0x50($Htbl),$HK
673         xorps           $Xm,$Xmn
674
675         movdqu          0x10($inp),$Xl
676          movdqu         0($inp),$T1
677         pshufb          $T3,$Xl
678          pshufb         $T3,$T1
679         movdqa          $Xl,$Xh
680         pshufd          \$0b01001110,$Xl,$Xm
681          pxor           $T1,$Xi
682         pxor            $Xl,$Xm
683         pclmulqdq       \$0x00,$Hkey3,$Xl
684          movdqa         $Xi,$Xhi
685          pshufd         \$0b01001110,$Xi,$T1
686          pxor           $Xi,$T1
687         pclmulqdq       \$0x11,$Hkey3,$Xh
688         xorps           $Xl,$Xln
689         pclmulqdq       \$0x00,$HK,$Xm
690         xorps           $Xh,$Xhn
691
692         lea     0x40($inp),$inp
693         sub     \$0x40,$len
694         jc      .Ltail4x
695
696         jmp     .Lmod4_loop
697 .align  32
698 .Lmod4_loop:
699         pclmulqdq       \$0x00,$Hkey4,$Xi
700         xorps           $Xm,$Xmn
701          movdqu         0x30($inp),$Xl
702          pshufb         $T3,$Xl
703         pclmulqdq       \$0x11,$Hkey4,$Xhi
704         xorps           $Xln,$Xi
705          movdqu         0x20($inp),$Xln
706          movdqa         $Xl,$Xh
707          pshufd         \$0b01001110,$Xl,$Xm
708         pclmulqdq       \$0x10,$HK,$T1
709         xorps           $Xhn,$Xhi
710          pxor           $Xl,$Xm
711          pshufb         $T3,$Xln
712         movups          0x20($Htbl),$HK
713          pclmulqdq      \$0x00,$Hkey,$Xl
714         xorps           $Xmn,$T1
715          movdqa         $Xln,$Xhn
716          pshufd         \$0b01001110,$Xln,$Xmn
717
718         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
719          pxor           $Xln,$Xmn
720         pxor            $Xhi,$T1                #
721         movdqa          $T1,$T2                 #
722         pslldq          \$8,$T1
723          pclmulqdq      \$0x11,$Hkey,$Xh
724         psrldq          \$8,$T2                 #
725         pxor            $T1,$Xi
726         movdqa          .L7_mask(%rip),$T1
727         pxor            $T2,$Xhi                #
728         movq            %rax,$T2
729
730         pand            $Xi,$T1                 # 1st phase
731         pshufb          $T1,$T2                 #
732          pclmulqdq      \$0x00,$HK,$Xm
733         pxor            $Xi,$T2                 #
734         psllq           \$57,$T2                #
735         movdqa          $T2,$T1                 #
736         pslldq          \$8,$T2
737          pclmulqdq      \$0x00,$Hkey2,$Xln
738         psrldq          \$8,$T1                 #       
739         pxor            $T2,$Xi
740         pxor            $T1,$Xhi                #
741         movdqu          0($inp),$T1
742
743         movdqa          $Xi,$T2                 # 2nd phase
744         psrlq           \$1,$Xi
745          pclmulqdq      \$0x11,$Hkey2,$Xhn
746          xorps          $Xl,$Xln
747          movdqu         0x10($inp),$Xl
748          pshufb         $T3,$Xl
749          pclmulqdq      \$0x10,$HK,$Xmn
750          xorps          $Xh,$Xhn
751          movups         0x50($Htbl),$HK
752         pshufb          $T3,$T1
753         pxor            $T2,$Xhi                #
754         pxor            $Xi,$T2
755         psrlq           \$5,$Xi
756
757          movdqa         $Xl,$Xh
758          pxor           $Xm,$Xmn
759          pshufd         \$0b01001110,$Xl,$Xm
760          pxor           $Xl,$Xm
761          pclmulqdq      \$0x00,$Hkey3,$Xl
762         pxor            $T2,$Xi                 #
763         pxor            $T1,$Xhi
764         psrlq           \$1,$Xi                 #
765          pclmulqdq      \$0x11,$Hkey3,$Xh
766          xorps          $Xl,$Xln
767         pxor            $Xhi,$Xi                #
768
769          pclmulqdq      \$0x00,$HK,$Xm
770          xorps          $Xh,$Xhn
771
772         movdqa          $Xi,$Xhi
773         pshufd          \$0b01001110,$Xi,$T1
774         pxor            $Xi,$T1
775
776         lea     0x40($inp),$inp
777         sub     \$0x40,$len
778         jnc     .Lmod4_loop
779
780 .Ltail4x:
781         pclmulqdq       \$0x00,$Hkey4,$Xi
782         xorps           $Xm,$Xmn
783         pclmulqdq       \$0x11,$Hkey4,$Xhi
784         xorps           $Xln,$Xi
785         pclmulqdq       \$0x10,$HK,$T1
786         xorps           $Xhn,$Xhi
787         pxor            $Xi,$Xhi                # aggregated Karatsuba post-processing
788         pxor            $Xmn,$T1
789
790         pxor            $Xhi,$T1                #
791         pxor            $Xi,$Xhi
792
793         movdqa          $T1,$T2                 #
794         psrldq          \$8,$T1
795         pslldq          \$8,$T2                 #
796         pxor            $T1,$Xhi
797         pxor            $T2,$Xi                 #
798 ___
799         &reduction_alg9($Xhi,$Xi);
800 $code.=<<___;
801         add     \$0x40,$len
802         jz      .Ldone
803         movdqu  0x20($Htbl),$HK
804         sub     \$0x10,$len
805         jz      .Lodd_tail
806 .Lskip4x:
807 ___
808 }
809 $code.=<<___;
810         #######
811         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
812         #       [(H*Ii+1) + (H*Xi+1)] mod P =
813         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
814         #
815         movdqu          ($inp),$T1              # Ii
816         movdqu          16($inp),$Xln           # Ii+1
817         pshufb          $T3,$T1
818         pshufb          $T3,$Xln
819         pxor            $T1,$Xi                 # Ii+Xi
820
821         movdqa          $Xln,$Xhn
822         pshufd          \$0b01001110,$Xln,$T1
823         pxor            $Xln,$T1
824         pclmulqdq       \$0x00,$Hkey,$Xln
825         pclmulqdq       \$0x11,$Hkey,$Xhn
826         pclmulqdq       \$0x00,$HK,$T1
827
828         lea             32($inp),$inp           # i+=2
829         sub             \$0x20,$len
830         jbe             .Leven_tail
831         jmp             .Lmod_loop
832
833 .align  32
834 .Lmod_loop:
835         movdqa          $Xi,$Xhi
836         pshufd          \$0b01001110,$Xi,$T2    #
837         pxor            $Xi,$T2                 #
838
839         pclmulqdq       \$0x00,$Hkey2,$Xi
840         pclmulqdq       \$0x11,$Hkey2,$Xhi
841         pclmulqdq       \$0x10,$HK,$T2
842
843         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
844         pxor            $Xhn,$Xhi
845           movdqu        ($inp),$Xhn             # Ii
846           pshufb        $T3,$Xhn
847           movdqu        16($inp),$Xln           # Ii+1
848
849         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
850         pxor            $Xhi,$T1
851           pxor          $Xhn,$Xhi               # "Ii+Xi", consume early
852         pxor            $T1,$T2
853          pshufb         $T3,$Xln
854         movdqa          $T2,$T1                 #
855         psrldq          \$8,$T1
856         pslldq          \$8,$T2                 #
857         pxor            $T1,$Xhi
858         pxor            $T2,$Xi                 #
859
860         movdqa          $Xln,$Xhn               #
861
862           movdqa        $Xi,$T2                 # 1st phase
863           movdqa        $Xi,$T1
864           psllq         \$5,$Xi
865         pclmulqdq       \$0x00,$Hkey,$Xln       #######
866           pxor          $Xi,$T1                 #
867           psllq         \$1,$Xi
868           pxor          $T1,$Xi                 #
869           psllq         \$57,$Xi                #
870           movdqa        $Xi,$T1                 #
871           pslldq        \$8,$Xi
872           psrldq        \$8,$T1                 #       
873           pxor          $T2,$Xi
874           pxor          $T1,$Xhi                #
875         pshufd          \$0b01001110,$Xhn,$T1
876         pxor            $Xhn,$T1                #
877
878         pclmulqdq       \$0x11,$Hkey,$Xhn       #######
879           movdqa        $Xi,$T2                 # 2nd phase
880           psrlq         \$1,$Xi
881           pxor          $T2,$Xhi                #
882           pxor          $Xi,$T2
883           psrlq         \$5,$Xi
884           pxor          $T2,$Xi                 #
885           psrlq         \$1,$Xi                 #
886         pclmulqdq       \$0x00,$HK,$T1          #######
887           pxor          $Xhi,$Xi                #
888
889         lea             32($inp),$inp
890         sub             \$0x20,$len
891         ja              .Lmod_loop
892
893 .Leven_tail:
894          movdqa         $Xi,$Xhi
895          pshufd         \$0b01001110,$Xi,$T2    #
896          pxor           $Xi,$T2                 #
897
898         pclmulqdq       \$0x00,$Hkey2,$Xi
899         pclmulqdq       \$0x11,$Hkey2,$Xhi
900         pclmulqdq       \$0x10,$HK,$T2
901
902         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
903         pxor            $Xhn,$Xhi
904         pxor            $Xi,$T1
905         pxor            $Xhi,$T1
906         pxor            $T1,$T2
907         movdqa          $T2,$T1                 #
908         psrldq          \$8,$T1
909         pslldq          \$8,$T2                 #
910         pxor            $T1,$Xhi
911         pxor            $T2,$Xi                 #
912 ___
913         &reduction_alg9 ($Xhi,$Xi);
914 $code.=<<___;
915         test            $len,$len
916         jnz             .Ldone
917
918 .Lodd_tail:
919         movdqu          ($inp),$T1              # Ii
920         pshufb          $T3,$T1
921         pxor            $T1,$Xi                 # Ii+Xi
922 ___
923         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H*(Ii+Xi)
924         &reduction_alg9 ($Xhi,$Xi);
925 $code.=<<___;
926 .Ldone:
927         pshufb          $T3,$Xi
928         movdqu          $Xi,($Xip)
929 ___
930 $code.=<<___ if ($win64);
931         movaps  (%rsp),%xmm6
932         movaps  0x10(%rsp),%xmm7
933         movaps  0x20(%rsp),%xmm8
934         movaps  0x30(%rsp),%xmm9
935         movaps  0x40(%rsp),%xmm10
936         movaps  0x50(%rsp),%xmm11
937         movaps  0x60(%rsp),%xmm12
938         movaps  0x70(%rsp),%xmm13
939         movaps  0x80(%rsp),%xmm14
940         movaps  0x90(%rsp),%xmm15
941         lea     0xa8(%rsp),%rsp
942 .LSEH_end_gcm_ghash_clmul:
943 ___
944 $code.=<<___;
945         ret
946 .size   gcm_ghash_clmul,.-gcm_ghash_clmul
947 ___
948 }
949 \f
950 $code.=<<___;
951 .globl  gcm_init_avx
952 .type   gcm_init_avx,\@abi-omnipotent
953 .align  32
954 gcm_init_avx:
955 ___
956 if ($avx) {
957 my ($Htbl,$Xip)=@_4args;
958 my $HK="%xmm6";
959
960 $code.=<<___ if ($win64);
961 .LSEH_begin_gcm_init_avx:
962         # I can't trust assembler to use specific encoding:-(
963         .byte   0x48,0x83,0xec,0x18             #sub    $0x18,%rsp
964         .byte   0x0f,0x29,0x34,0x24             #movaps %xmm6,(%rsp)
965 ___
966 $code.=<<___;
967         vzeroupper
968
969         vmovdqu         ($Xip),$Hkey
970         vpshufd         \$0b01001110,$Hkey,$Hkey        # dword swap
971
972         # <<1 twist
973         vpshufd         \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
974         vpsrlq          \$63,$Hkey,$T1
975         vpsllq          \$1,$Hkey,$Hkey
976         vpxor           $T3,$T3,$T3             #
977         vpcmpgtd        $T2,$T3,$T3             # broadcast carry bit
978         vpslldq         \$8,$T1,$T1
979         vpor            $T1,$Hkey,$Hkey         # H<<=1
980
981         # magic reduction
982         vpand           .L0x1c2_polynomial(%rip),$T3,$T3
983         vpxor           $T3,$Hkey,$Hkey         # if(carry) H^=0x1c2_polynomial
984
985         vpunpckhqdq     $Hkey,$Hkey,$HK
986         vmovdqa         $Hkey,$Xi
987         vpxor           $Hkey,$HK,$HK
988         mov             \$4,%r10                # up to H^8
989         jmp             .Linit_start_avx
990 ___
991
992 sub clmul64x64_avx {
993 my ($Xhi,$Xi,$Hkey,$HK)=@_;
994
995 if (!defined($HK)) {    $HK = $T2;
996 $code.=<<___;
997         vpunpckhqdq     $Xi,$Xi,$T1
998         vpunpckhqdq     $Hkey,$Hkey,$T2
999         vpxor           $Xi,$T1,$T1             #
1000         vpxor           $Hkey,$T2,$T2
1001 ___
1002 } else {
1003 $code.=<<___;
1004         vpunpckhqdq     $Xi,$Xi,$T1
1005         vpxor           $Xi,$T1,$T1             #
1006 ___
1007 }
1008 $code.=<<___;
1009         vpclmulqdq      \$0x11,$Hkey,$Xi,$Xhi   #######
1010         vpclmulqdq      \$0x00,$Hkey,$Xi,$Xi    #######
1011         vpclmulqdq      \$0x00,$HK,$T1,$T1      #######
1012         vpxor           $Xi,$Xhi,$T2            #
1013         vpxor           $T2,$T1,$T1             #
1014
1015         vpslldq         \$8,$T1,$T2             #
1016         vpsrldq         \$8,$T1,$T1
1017         vpxor           $T2,$Xi,$Xi             #
1018         vpxor           $T1,$Xhi,$Xhi
1019 ___
1020 }
1021
1022 sub reduction_avx {
1023 my ($Xhi,$Xi) = @_;
1024
1025 $code.=<<___;
1026         vpsllq          \$57,$Xi,$T1            # 1st phase
1027         vpsllq          \$62,$Xi,$T2
1028         vpxor           $T1,$T2,$T2             #
1029         vpsllq          \$63,$Xi,$T1
1030         vpxor           $T1,$T2,$T2             #
1031         vpslldq         \$8,$T2,$T1             #
1032         vpsrldq         \$8,$T2,$T2
1033         vpxor           $T1,$Xi,$Xi             #
1034         vpxor           $T2,$Xhi,$Xhi
1035
1036         vpsrlq          \$1,$Xi,$T2             # 2nd phase
1037         vpxor           $Xi,$Xhi,$Xhi
1038         vpxor           $T2,$Xi,$Xi             #
1039         vpsrlq          \$5,$T2,$T2
1040         vpxor           $T2,$Xi,$Xi             #
1041         vpsrlq          \$1,$Xi,$Xi             #
1042         vpxor           $Xhi,$Xi,$Xi            #
1043 ___
1044 }
1045
1046 $code.=<<___;
1047 .align  32
1048 .Linit_loop_avx:
1049         vpalignr        \$8,$T1,$T2,$T3         # low part is H.lo^H.hi...
1050         vmovdqu         $T3,-0x10($Htbl)        # save Karatsuba "salt"
1051 ___
1052         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^3,5,7
1053         &reduction_avx  ($Xhi,$Xi);
1054 $code.=<<___;
1055 .Linit_start_avx:
1056         vmovdqa         $Xi,$T3
1057 ___
1058         &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK);   # calculate H^2,4,6,8
1059         &reduction_avx  ($Xhi,$Xi);
1060 $code.=<<___;
1061         vpshufd         \$0b01001110,$T3,$T1
1062         vpshufd         \$0b01001110,$Xi,$T2
1063         vpxor           $T3,$T1,$T1             # Karatsuba pre-processing
1064         vmovdqu         $T3,0x00($Htbl)         # save H^1,3,5,7
1065         vpxor           $Xi,$T2,$T2             # Karatsuba pre-processing
1066         vmovdqu         $Xi,0x10($Htbl)         # save H^2,4,6,8
1067         lea             0x30($Htbl),$Htbl
1068         sub             \$1,%r10
1069         jnz             .Linit_loop_avx
1070
1071         vpalignr        \$8,$T2,$T1,$T3         # last "salt" is flipped
1072         vmovdqu         $T3,-0x10($Htbl)
1073
1074         vzeroupper
1075 ___
1076 $code.=<<___ if ($win64);
1077         movaps  (%rsp),%xmm6
1078         lea     0x18(%rsp),%rsp
1079 .LSEH_end_gcm_init_avx:
1080 ___
1081 $code.=<<___;
1082         ret
1083 .size   gcm_init_avx,.-gcm_init_avx
1084 ___
1085 } else {
1086 $code.=<<___;
1087         jmp     .L_init_clmul
1088 .size   gcm_init_avx,.-gcm_init_avx
1089 ___
1090 }
1091
1092 $code.=<<___;
1093 .globl  gcm_gmult_avx
1094 .type   gcm_gmult_avx,\@abi-omnipotent
1095 .align  32
1096 gcm_gmult_avx:
1097         jmp     .L_gmult_clmul
1098 .size   gcm_gmult_avx,.-gcm_gmult_avx
1099 ___
1100 \f
1101 $code.=<<___;
1102 .globl  gcm_ghash_avx
1103 .type   gcm_ghash_avx,\@abi-omnipotent
1104 .align  32
1105 gcm_ghash_avx:
1106 ___
1107 if ($avx) {
1108 my ($Xip,$Htbl,$inp,$len)=@_4args;
1109 my ($Xlo,$Xhi,$Xmi,
1110     $Zlo,$Zhi,$Zmi,
1111     $Hkey,$HK,$T1,$T2,
1112     $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1113
1114 $code.=<<___ if ($win64);
1115         lea     -0x88(%rsp),%rax
1116 .LSEH_begin_gcm_ghash_avx:
1117         # I can't trust assembler to use specific encoding:-(
1118         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1119         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
1120         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
1121         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
1122         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
1123         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
1124         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
1125         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
1126         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
1127         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
1128         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
1129 ___
1130 $code.=<<___;
1131         vzeroupper
1132
1133         vmovdqu         ($Xip),$Xi              # load $Xi
1134         lea             .L0x1c2_polynomial(%rip),%r10
1135         lea             0x40($Htbl),$Htbl       # size optimization
1136         vmovdqu         .Lbswap_mask(%rip),$bswap
1137         vpshufb         $bswap,$Xi,$Xi
1138         cmp             \$0x80,$len
1139         jb              .Lshort_avx
1140         sub             \$0x80,$len
1141
1142         vmovdqu         0x70($inp),$Ii          # I[7]
1143         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1144         vpshufb         $bswap,$Ii,$Ii
1145         vmovdqu         0x20-0x40($Htbl),$HK
1146
1147         vpunpckhqdq     $Ii,$Ii,$T2
1148          vmovdqu        0x60($inp),$Ij          # I[6]
1149         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1150         vpxor           $Ii,$T2,$T2
1151          vpshufb        $bswap,$Ij,$Ij
1152         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1153          vmovdqu        0x10-0x40($Htbl),$Hkey  # $Hkey^2
1154          vpunpckhqdq    $Ij,$Ij,$T1
1155          vmovdqu        0x50($inp),$Ii          # I[5]
1156         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1157          vpxor          $Ij,$T1,$T1
1158
1159          vpshufb        $bswap,$Ii,$Ii
1160         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1161          vpunpckhqdq    $Ii,$Ii,$T2
1162         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1163          vmovdqu        0x30-0x40($Htbl),$Hkey  # $Hkey^3
1164          vpxor          $Ii,$T2,$T2
1165          vmovdqu        0x40($inp),$Ij          # I[4]
1166         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1167          vmovdqu        0x50-0x40($Htbl),$HK
1168
1169          vpshufb        $bswap,$Ij,$Ij
1170         vpxor           $Xlo,$Zlo,$Zlo
1171         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1172         vpxor           $Xhi,$Zhi,$Zhi
1173          vpunpckhqdq    $Ij,$Ij,$T1
1174         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1175          vmovdqu        0x40-0x40($Htbl),$Hkey  # $Hkey^4
1176         vpxor           $Xmi,$Zmi,$Zmi
1177         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1178          vpxor          $Ij,$T1,$T1
1179
1180          vmovdqu        0x30($inp),$Ii          # I[3]
1181         vpxor           $Zlo,$Xlo,$Xlo
1182         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1183         vpxor           $Zhi,$Xhi,$Xhi
1184          vpshufb        $bswap,$Ii,$Ii
1185         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1186          vmovdqu        0x60-0x40($Htbl),$Hkey  # $Hkey^5
1187         vpxor           $Zmi,$Xmi,$Xmi
1188          vpunpckhqdq    $Ii,$Ii,$T2
1189         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1190          vmovdqu        0x80-0x40($Htbl),$HK
1191          vpxor          $Ii,$T2,$T2
1192
1193          vmovdqu        0x20($inp),$Ij          # I[2]
1194         vpxor           $Xlo,$Zlo,$Zlo
1195         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1196         vpxor           $Xhi,$Zhi,$Zhi
1197          vpshufb        $bswap,$Ij,$Ij
1198         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1199          vmovdqu        0x70-0x40($Htbl),$Hkey  # $Hkey^6
1200         vpxor           $Xmi,$Zmi,$Zmi
1201          vpunpckhqdq    $Ij,$Ij,$T1
1202         vpclmulqdq      \$0x00,$HK,$T2,$Xmi
1203          vpxor          $Ij,$T1,$T1
1204
1205          vmovdqu        0x10($inp),$Ii          # I[1]
1206         vpxor           $Zlo,$Xlo,$Xlo
1207         vpclmulqdq      \$0x00,$Hkey,$Ij,$Zlo
1208         vpxor           $Zhi,$Xhi,$Xhi
1209          vpshufb        $bswap,$Ii,$Ii
1210         vpclmulqdq      \$0x11,$Hkey,$Ij,$Zhi
1211          vmovdqu        0x90-0x40($Htbl),$Hkey  # $Hkey^7
1212         vpxor           $Zmi,$Xmi,$Xmi
1213          vpunpckhqdq    $Ii,$Ii,$T2
1214         vpclmulqdq      \$0x10,$HK,$T1,$Zmi
1215          vmovdqu        0xb0-0x40($Htbl),$HK
1216          vpxor          $Ii,$T2,$T2
1217
1218          vmovdqu        ($inp),$Ij              # I[0]
1219         vpxor           $Xlo,$Zlo,$Zlo
1220         vpclmulqdq      \$0x00,$Hkey,$Ii,$Xlo
1221         vpxor           $Xhi,$Zhi,$Zhi
1222          vpshufb        $bswap,$Ij,$Ij
1223         vpclmulqdq      \$0x11,$Hkey,$Ii,$Xhi
1224          vmovdqu        0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1225         vpxor           $Xmi,$Zmi,$Zmi
1226         vpclmulqdq      \$0x10,$HK,$T2,$Xmi
1227
1228         lea             0x80($inp),$inp
1229         cmp             \$0x80,$len
1230         jb              .Ltail_avx
1231
1232         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1233         sub             \$0x80,$len
1234         jmp             .Loop8x_avx
1235
1236 .align  32
1237 .Loop8x_avx:
1238         vpunpckhqdq     $Ij,$Ij,$T1
1239          vmovdqu        0x70($inp),$Ii          # I[7]
1240         vpxor           $Xlo,$Zlo,$Zlo
1241         vpxor           $Ij,$T1,$T1
1242         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xi
1243          vpshufb        $bswap,$Ii,$Ii
1244         vpxor           $Xhi,$Zhi,$Zhi
1245         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xo
1246          vmovdqu        0x00-0x40($Htbl),$Hkey  # $Hkey^1
1247          vpunpckhqdq    $Ii,$Ii,$T2
1248         vpxor           $Xmi,$Zmi,$Zmi
1249         vpclmulqdq      \$0x00,$HK,$T1,$Tred
1250          vmovdqu        0x20-0x40($Htbl),$HK
1251          vpxor          $Ii,$T2,$T2
1252
1253           vmovdqu       0x60($inp),$Ij          # I[6]
1254          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1255         vpxor           $Zlo,$Xi,$Xi            # collect result
1256           vpshufb       $bswap,$Ij,$Ij
1257          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1258         vxorps          $Zhi,$Xo,$Xo
1259           vmovdqu       0x10-0x40($Htbl),$Hkey  # $Hkey^2
1260          vpunpckhqdq    $Ij,$Ij,$T1
1261          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1262         vpxor           $Zmi,$Tred,$Tred
1263          vxorps         $Ij,$T1,$T1
1264
1265           vmovdqu       0x50($inp),$Ii          # I[5]
1266         vpxor           $Xi,$Tred,$Tred         # aggregated Karatsuba post-processing
1267          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1268         vpxor           $Xo,$Tred,$Tred
1269         vpslldq         \$8,$Tred,$T2
1270          vpxor          $Xlo,$Zlo,$Zlo
1271          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1272         vpsrldq         \$8,$Tred,$Tred
1273         vpxor           $T2, $Xi, $Xi
1274           vmovdqu       0x30-0x40($Htbl),$Hkey  # $Hkey^3
1275           vpshufb       $bswap,$Ii,$Ii
1276         vxorps          $Tred,$Xo, $Xo
1277          vpxor          $Xhi,$Zhi,$Zhi
1278          vpunpckhqdq    $Ii,$Ii,$T2
1279          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1280           vmovdqu       0x50-0x40($Htbl),$HK
1281          vpxor          $Ii,$T2,$T2
1282          vpxor          $Xmi,$Zmi,$Zmi
1283
1284           vmovdqu       0x40($inp),$Ij          # I[4]
1285         vpalignr        \$8,$Xi,$Xi,$Tred       # 1st phase
1286          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1287           vpshufb       $bswap,$Ij,$Ij
1288          vpxor          $Zlo,$Xlo,$Xlo
1289          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1290           vmovdqu       0x40-0x40($Htbl),$Hkey  # $Hkey^4
1291          vpunpckhqdq    $Ij,$Ij,$T1
1292          vpxor          $Zhi,$Xhi,$Xhi
1293          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1294          vxorps         $Ij,$T1,$T1
1295          vpxor          $Zmi,$Xmi,$Xmi
1296
1297           vmovdqu       0x30($inp),$Ii          # I[3]
1298         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1299          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1300           vpshufb       $bswap,$Ii,$Ii
1301          vpxor          $Xlo,$Zlo,$Zlo
1302          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1303           vmovdqu       0x60-0x40($Htbl),$Hkey  # $Hkey^5
1304          vpunpckhqdq    $Ii,$Ii,$T2
1305          vpxor          $Xhi,$Zhi,$Zhi
1306          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1307           vmovdqu       0x80-0x40($Htbl),$HK
1308          vpxor          $Ii,$T2,$T2
1309          vpxor          $Xmi,$Zmi,$Zmi
1310
1311           vmovdqu       0x20($inp),$Ij          # I[2]
1312          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1313           vpshufb       $bswap,$Ij,$Ij
1314          vpxor          $Zlo,$Xlo,$Xlo
1315          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1316           vmovdqu       0x70-0x40($Htbl),$Hkey  # $Hkey^6
1317          vpunpckhqdq    $Ij,$Ij,$T1
1318          vpxor          $Zhi,$Xhi,$Xhi
1319          vpclmulqdq     \$0x00,$HK,  $T2,$Xmi
1320          vpxor          $Ij,$T1,$T1
1321          vpxor          $Zmi,$Xmi,$Xmi
1322         vxorps          $Tred,$Xi,$Xi
1323
1324           vmovdqu       0x10($inp),$Ii          # I[1]
1325         vpalignr        \$8,$Xi,$Xi,$Tred       # 2nd phase
1326          vpclmulqdq     \$0x00,$Hkey,$Ij,$Zlo
1327           vpshufb       $bswap,$Ii,$Ii
1328          vpxor          $Xlo,$Zlo,$Zlo
1329          vpclmulqdq     \$0x11,$Hkey,$Ij,$Zhi
1330           vmovdqu       0x90-0x40($Htbl),$Hkey  # $Hkey^7
1331         vpclmulqdq      \$0x10,(%r10),$Xi,$Xi
1332         vxorps          $Xo,$Tred,$Tred
1333          vpunpckhqdq    $Ii,$Ii,$T2
1334          vpxor          $Xhi,$Zhi,$Zhi
1335          vpclmulqdq     \$0x10,$HK,  $T1,$Zmi
1336           vmovdqu       0xb0-0x40($Htbl),$HK
1337          vpxor          $Ii,$T2,$T2
1338          vpxor          $Xmi,$Zmi,$Zmi
1339
1340           vmovdqu       ($inp),$Ij              # I[0]
1341          vpclmulqdq     \$0x00,$Hkey,$Ii,$Xlo
1342           vpshufb       $bswap,$Ij,$Ij
1343          vpclmulqdq     \$0x11,$Hkey,$Ii,$Xhi
1344           vmovdqu       0xa0-0x40($Htbl),$Hkey  # $Hkey^8
1345         vpxor           $Tred,$Ij,$Ij
1346          vpclmulqdq     \$0x10,$HK,  $T2,$Xmi
1347         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1348
1349         lea             0x80($inp),$inp
1350         sub             \$0x80,$len
1351         jnc             .Loop8x_avx
1352
1353         add             \$0x80,$len
1354         jmp             .Ltail_no_xor_avx
1355
1356 .align  32
1357 .Lshort_avx:
1358         vmovdqu         -0x10($inp,$len),$Ii    # very last word
1359         lea             ($inp,$len),$inp
1360         vmovdqu         0x00-0x40($Htbl),$Hkey  # $Hkey^1
1361         vmovdqu         0x20-0x40($Htbl),$HK
1362         vpshufb         $bswap,$Ii,$Ij
1363
1364         vmovdqa         $Xlo,$Zlo               # subtle way to zero $Zlo,
1365         vmovdqa         $Xhi,$Zhi               # $Zhi and
1366         vmovdqa         $Xmi,$Zmi               # $Zmi
1367         sub             \$0x10,$len
1368         jz              .Ltail_avx
1369
1370         vpunpckhqdq     $Ij,$Ij,$T1
1371         vpxor           $Xlo,$Zlo,$Zlo
1372         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1373         vpxor           $Ij,$T1,$T1
1374          vmovdqu        -0x20($inp),$Ii
1375         vpxor           $Xhi,$Zhi,$Zhi
1376         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1377         vmovdqu         0x10-0x40($Htbl),$Hkey  # $Hkey^2
1378          vpshufb        $bswap,$Ii,$Ij
1379         vpxor           $Xmi,$Zmi,$Zmi
1380         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1381         vpsrldq         \$8,$HK,$HK
1382         sub             \$0x10,$len
1383         jz              .Ltail_avx
1384
1385         vpunpckhqdq     $Ij,$Ij,$T1
1386         vpxor           $Xlo,$Zlo,$Zlo
1387         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1388         vpxor           $Ij,$T1,$T1
1389          vmovdqu        -0x30($inp),$Ii
1390         vpxor           $Xhi,$Zhi,$Zhi
1391         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1392         vmovdqu         0x30-0x40($Htbl),$Hkey  # $Hkey^3
1393          vpshufb        $bswap,$Ii,$Ij
1394         vpxor           $Xmi,$Zmi,$Zmi
1395         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1396         vmovdqu         0x50-0x40($Htbl),$HK
1397         sub             \$0x10,$len
1398         jz              .Ltail_avx
1399
1400         vpunpckhqdq     $Ij,$Ij,$T1
1401         vpxor           $Xlo,$Zlo,$Zlo
1402         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1403         vpxor           $Ij,$T1,$T1
1404          vmovdqu        -0x40($inp),$Ii
1405         vpxor           $Xhi,$Zhi,$Zhi
1406         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1407         vmovdqu         0x40-0x40($Htbl),$Hkey  # $Hkey^4
1408          vpshufb        $bswap,$Ii,$Ij
1409         vpxor           $Xmi,$Zmi,$Zmi
1410         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1411         vpsrldq         \$8,$HK,$HK
1412         sub             \$0x10,$len
1413         jz              .Ltail_avx
1414
1415         vpunpckhqdq     $Ij,$Ij,$T1
1416         vpxor           $Xlo,$Zlo,$Zlo
1417         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1418         vpxor           $Ij,$T1,$T1
1419          vmovdqu        -0x50($inp),$Ii
1420         vpxor           $Xhi,$Zhi,$Zhi
1421         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1422         vmovdqu         0x60-0x40($Htbl),$Hkey  # $Hkey^5
1423          vpshufb        $bswap,$Ii,$Ij
1424         vpxor           $Xmi,$Zmi,$Zmi
1425         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1426         vmovdqu         0x80-0x40($Htbl),$HK
1427         sub             \$0x10,$len
1428         jz              .Ltail_avx
1429
1430         vpunpckhqdq     $Ij,$Ij,$T1
1431         vpxor           $Xlo,$Zlo,$Zlo
1432         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1433         vpxor           $Ij,$T1,$T1
1434          vmovdqu        -0x60($inp),$Ii
1435         vpxor           $Xhi,$Zhi,$Zhi
1436         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1437         vmovdqu         0x70-0x40($Htbl),$Hkey  # $Hkey^6
1438          vpshufb        $bswap,$Ii,$Ij
1439         vpxor           $Xmi,$Zmi,$Zmi
1440         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1441         vpsrldq         \$8,$HK,$HK
1442         sub             \$0x10,$len
1443         jz              .Ltail_avx
1444
1445         vpunpckhqdq     $Ij,$Ij,$T1
1446         vpxor           $Xlo,$Zlo,$Zlo
1447         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1448         vpxor           $Ij,$T1,$T1
1449          vmovdqu        -0x70($inp),$Ii
1450         vpxor           $Xhi,$Zhi,$Zhi
1451         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1452         vmovdqu         0x90-0x40($Htbl),$Hkey  # $Hkey^7
1453          vpshufb        $bswap,$Ii,$Ij
1454         vpxor           $Xmi,$Zmi,$Zmi
1455         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1456         vmovq           0xb8-0x40($Htbl),$HK
1457         sub             \$0x10,$len
1458         jmp             .Ltail_avx
1459
1460 .align  32
1461 .Ltail_avx:
1462         vpxor           $Xi,$Ij,$Ij             # accumulate $Xi
1463 .Ltail_no_xor_avx:
1464         vpunpckhqdq     $Ij,$Ij,$T1
1465         vpxor           $Xlo,$Zlo,$Zlo
1466         vpclmulqdq      \$0x00,$Hkey,$Ij,$Xlo
1467         vpxor           $Ij,$T1,$T1
1468         vpxor           $Xhi,$Zhi,$Zhi
1469         vpclmulqdq      \$0x11,$Hkey,$Ij,$Xhi
1470         vpxor           $Xmi,$Zmi,$Zmi
1471         vpclmulqdq      \$0x00,$HK,$T1,$Xmi
1472
1473         vmovdqu         (%r10),$Tred
1474
1475         vpxor           $Xlo,$Zlo,$Xi
1476         vpxor           $Xhi,$Zhi,$Xo
1477         vpxor           $Xmi,$Zmi,$Zmi
1478
1479         vpxor           $Xi, $Zmi,$Zmi          # aggregated Karatsuba post-processing
1480         vpxor           $Xo, $Zmi,$Zmi
1481         vpslldq         \$8, $Zmi,$T2
1482         vpsrldq         \$8, $Zmi,$Zmi
1483         vpxor           $T2, $Xi, $Xi
1484         vpxor           $Zmi,$Xo, $Xo
1485
1486         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 1st phase
1487         vpalignr        \$8,$Xi,$Xi,$Xi
1488         vpxor           $T2,$Xi,$Xi
1489
1490         vpclmulqdq      \$0x10,$Tred,$Xi,$T2    # 2nd phase
1491         vpalignr        \$8,$Xi,$Xi,$Xi
1492         vpxor           $Xo,$Xi,$Xi
1493         vpxor           $T2,$Xi,$Xi
1494
1495         cmp             \$0,$len
1496         jne             .Lshort_avx
1497
1498         vpshufb         $bswap,$Xi,$Xi
1499         vmovdqu         $Xi,($Xip)
1500         vzeroupper
1501 ___
1502 $code.=<<___ if ($win64);
1503         movaps  (%rsp),%xmm6
1504         movaps  0x10(%rsp),%xmm7
1505         movaps  0x20(%rsp),%xmm8
1506         movaps  0x30(%rsp),%xmm9
1507         movaps  0x40(%rsp),%xmm10
1508         movaps  0x50(%rsp),%xmm11
1509         movaps  0x60(%rsp),%xmm12
1510         movaps  0x70(%rsp),%xmm13
1511         movaps  0x80(%rsp),%xmm14
1512         movaps  0x90(%rsp),%xmm15
1513         lea     0xa8(%rsp),%rsp
1514 .LSEH_end_gcm_ghash_avx:
1515 ___
1516 $code.=<<___;
1517         ret
1518 .size   gcm_ghash_avx,.-gcm_ghash_avx
1519 ___
1520 } else {
1521 $code.=<<___;
1522         jmp     .L_ghash_clmul
1523 .size   gcm_ghash_avx,.-gcm_ghash_avx
1524 ___
1525 }
1526 \f
1527 $code.=<<___;
1528 .align  64
1529 .Lbswap_mask:
1530         .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1531 .L0x1c2_polynomial:
1532         .byte   1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1533 .L7_mask:
1534         .long   7,0,7,0
1535 .L7_mask_poly:
1536         .long   7,0,`0xE1<<1`,0
1537 .align  64
1538 .type   .Lrem_4bit,\@object
1539 .Lrem_4bit:
1540         .long   0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1541         .long   0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1542         .long   0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1543         .long   0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1544 .type   .Lrem_8bit,\@object
1545 .Lrem_8bit:
1546         .value  0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1547         .value  0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1548         .value  0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1549         .value  0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1550         .value  0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1551         .value  0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1552         .value  0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1553         .value  0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1554         .value  0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1555         .value  0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1556         .value  0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1557         .value  0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1558         .value  0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1559         .value  0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1560         .value  0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1561         .value  0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1562         .value  0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1563         .value  0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1564         .value  0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1565         .value  0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1566         .value  0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1567         .value  0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1568         .value  0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1569         .value  0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1570         .value  0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1571         .value  0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1572         .value  0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1573         .value  0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1574         .value  0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1575         .value  0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1576         .value  0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1577         .value  0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1578
1579 .asciz  "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1580 .align  64
1581 ___
1582 \f
1583 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1584 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1585 if ($win64) {
1586 $rec="%rcx";
1587 $frame="%rdx";
1588 $context="%r8";
1589 $disp="%r9";
1590
1591 $code.=<<___;
1592 .extern __imp_RtlVirtualUnwind
1593 .type   se_handler,\@abi-omnipotent
1594 .align  16
1595 se_handler:
1596         push    %rsi
1597         push    %rdi
1598         push    %rbx
1599         push    %rbp
1600         push    %r12
1601         push    %r13
1602         push    %r14
1603         push    %r15
1604         pushfq
1605         sub     \$64,%rsp
1606
1607         mov     120($context),%rax      # pull context->Rax
1608         mov     248($context),%rbx      # pull context->Rip
1609
1610         mov     8($disp),%rsi           # disp->ImageBase
1611         mov     56($disp),%r11          # disp->HandlerData
1612
1613         mov     0(%r11),%r10d           # HandlerData[0]
1614         lea     (%rsi,%r10),%r10        # prologue label
1615         cmp     %r10,%rbx               # context->Rip<prologue label
1616         jb      .Lin_prologue
1617
1618         mov     152($context),%rax      # pull context->Rsp
1619
1620         mov     4(%r11),%r10d           # HandlerData[1]
1621         lea     (%rsi,%r10),%r10        # epilogue label
1622         cmp     %r10,%rbx               # context->Rip>=epilogue label
1623         jae     .Lin_prologue
1624
1625         lea     24(%rax),%rax           # adjust "rsp"
1626
1627         mov     -8(%rax),%rbx
1628         mov     -16(%rax),%rbp
1629         mov     -24(%rax),%r12
1630         mov     %rbx,144($context)      # restore context->Rbx
1631         mov     %rbp,160($context)      # restore context->Rbp
1632         mov     %r12,216($context)      # restore context->R12
1633
1634 .Lin_prologue:
1635         mov     8(%rax),%rdi
1636         mov     16(%rax),%rsi
1637         mov     %rax,152($context)      # restore context->Rsp
1638         mov     %rsi,168($context)      # restore context->Rsi
1639         mov     %rdi,176($context)      # restore context->Rdi
1640
1641         mov     40($disp),%rdi          # disp->ContextRecord
1642         mov     $context,%rsi           # context
1643         mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
1644         .long   0xa548f3fc              # cld; rep movsq
1645
1646         mov     $disp,%rsi
1647         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1648         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1649         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1650         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1651         mov     40(%rsi),%r10           # disp->ContextRecord
1652         lea     56(%rsi),%r11           # &disp->HandlerData
1653         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1654         mov     %r10,32(%rsp)           # arg5
1655         mov     %r11,40(%rsp)           # arg6
1656         mov     %r12,48(%rsp)           # arg7
1657         mov     %rcx,56(%rsp)           # arg8, (NULL)
1658         call    *__imp_RtlVirtualUnwind(%rip)
1659
1660         mov     \$1,%eax                # ExceptionContinueSearch
1661         add     \$64,%rsp
1662         popfq
1663         pop     %r15
1664         pop     %r14
1665         pop     %r13
1666         pop     %r12
1667         pop     %rbp
1668         pop     %rbx
1669         pop     %rdi
1670         pop     %rsi
1671         ret
1672 .size   se_handler,.-se_handler
1673
1674 .section        .pdata
1675 .align  4
1676         .rva    .LSEH_begin_gcm_gmult_4bit
1677         .rva    .LSEH_end_gcm_gmult_4bit
1678         .rva    .LSEH_info_gcm_gmult_4bit
1679
1680         .rva    .LSEH_begin_gcm_ghash_4bit
1681         .rva    .LSEH_end_gcm_ghash_4bit
1682         .rva    .LSEH_info_gcm_ghash_4bit
1683
1684         .rva    .LSEH_begin_gcm_init_clmul
1685         .rva    .LSEH_end_gcm_init_clmul
1686         .rva    .LSEH_info_gcm_init_clmul
1687
1688         .rva    .LSEH_begin_gcm_ghash_clmul
1689         .rva    .LSEH_end_gcm_ghash_clmul
1690         .rva    .LSEH_info_gcm_ghash_clmul
1691 ___
1692 $code.=<<___    if ($avx);
1693         .rva    .LSEH_begin_gcm_init_avx
1694         .rva    .LSEH_end_gcm_init_avx
1695         .rva    .LSEH_info_gcm_init_clmul
1696
1697         .rva    .LSEH_begin_gcm_ghash_avx
1698         .rva    .LSEH_end_gcm_ghash_avx
1699         .rva    .LSEH_info_gcm_ghash_clmul
1700 ___
1701 $code.=<<___;
1702 .section        .xdata
1703 .align  8
1704 .LSEH_info_gcm_gmult_4bit:
1705         .byte   9,0,0,0
1706         .rva    se_handler
1707         .rva    .Lgmult_prologue,.Lgmult_epilogue       # HandlerData
1708 .LSEH_info_gcm_ghash_4bit:
1709         .byte   9,0,0,0
1710         .rva    se_handler
1711         .rva    .Lghash_prologue,.Lghash_epilogue       # HandlerData
1712 .LSEH_info_gcm_init_clmul:
1713         .byte   0x01,0x08,0x03,0x00
1714         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1715         .byte   0x04,0x22,0x00,0x00     #sub    rsp,0x18
1716 .LSEH_info_gcm_ghash_clmul:
1717         .byte   0x01,0x33,0x16,0x00
1718         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1719         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1720         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1721         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1722         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1723         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1724         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1725         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1726         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1727         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1728         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1729 ___
1730 }
1731 \f
1732 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1733
1734 print $code;
1735
1736 close STDOUT;