ghash-x86_64.pl: fix length handling bug.
[openssl.git] / crypto / modes / asm / ghash-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # March, June 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
19 #
20 #               gcc 3.4.x(*)    assembler
21 #
22 # P4            28.6            14.0            +100%
23 # Opteron       19.3            7.7             +150%
24 # Core2         17.8            8.1(**)         +120%
25 # Atom          31.6            16.8            +88%
26 # VIA Nano      21.8            10.1            +115%
27 #
28 # (*)   comparison is not completely fair, because C results are
29 #       for vanilla "256B" implementation, while assembler results
30 #       are for "528B";-)
31 # (**)  it's mystery [to me] why Core2 result is not same as for
32 #       Opteron;
33
34 # May 2010
35 #
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
38 # techniques.
39 #
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
43
44 # December 2012
45 #
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
61 #
62 # Westmere      1.76(+14%)
63 # Sandy Bridge  1.79(+9%)
64 # Ivy Bridge    1.79(+8%)
65 # Bulldozer     1.52(+25%)
66
67 $flavour = shift;
68 $output  = shift;
69 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
70
71 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
72
73 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
74 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
75 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
76 die "can't locate x86_64-xlate.pl";
77
78 open OUT,"| \"$^X\" $xlate $flavour $output";
79 *STDOUT=*OUT;
80
81 $do4xaggr=1;
82
83 # common register layout
84 $nlo="%rax";
85 $nhi="%rbx";
86 $Zlo="%r8";
87 $Zhi="%r9";
88 $tmp="%r10";
89 $rem_4bit = "%r11";
90
91 $Xi="%rdi";
92 $Htbl="%rsi";
93
94 # per-function register layout
95 $cnt="%rcx";
96 $rem="%rdx";
97
98 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/     or
99                         $r =~ s/%[er]([sd]i)/%\1l/      or
100                         $r =~ s/%[er](bp)/%\1l/         or
101                         $r =~ s/%(r[0-9]+)[d]?/%\1b/;   $r; }
102
103 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
104 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
105   my $arg = pop;
106     $arg = "\$$arg" if ($arg*1 eq $arg);
107     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
108 }
109 \f
110 { my $N;
111   sub loop() {
112   my $inp = shift;
113
114         $N++;
115 $code.=<<___;
116         xor     $nlo,$nlo
117         xor     $nhi,$nhi
118         mov     `&LB("$Zlo")`,`&LB("$nlo")`
119         mov     `&LB("$Zlo")`,`&LB("$nhi")`
120         shl     \$4,`&LB("$nlo")`
121         mov     \$14,$cnt
122         mov     8($Htbl,$nlo),$Zlo
123         mov     ($Htbl,$nlo),$Zhi
124         and     \$0xf0,`&LB("$nhi")`
125         mov     $Zlo,$rem
126         jmp     .Loop$N
127
128 .align  16
129 .Loop$N:
130         shr     \$4,$Zlo
131         and     \$0xf,$rem
132         mov     $Zhi,$tmp
133         mov     ($inp,$cnt),`&LB("$nlo")`
134         shr     \$4,$Zhi
135         xor     8($Htbl,$nhi),$Zlo
136         shl     \$60,$tmp
137         xor     ($Htbl,$nhi),$Zhi
138         mov     `&LB("$nlo")`,`&LB("$nhi")`
139         xor     ($rem_4bit,$rem,8),$Zhi
140         mov     $Zlo,$rem
141         shl     \$4,`&LB("$nlo")`
142         xor     $tmp,$Zlo
143         dec     $cnt
144         js      .Lbreak$N
145
146         shr     \$4,$Zlo
147         and     \$0xf,$rem
148         mov     $Zhi,$tmp
149         shr     \$4,$Zhi
150         xor     8($Htbl,$nlo),$Zlo
151         shl     \$60,$tmp
152         xor     ($Htbl,$nlo),$Zhi
153         and     \$0xf0,`&LB("$nhi")`
154         xor     ($rem_4bit,$rem,8),$Zhi
155         mov     $Zlo,$rem
156         xor     $tmp,$Zlo
157         jmp     .Loop$N
158
159 .align  16
160 .Lbreak$N:
161         shr     \$4,$Zlo
162         and     \$0xf,$rem
163         mov     $Zhi,$tmp
164         shr     \$4,$Zhi
165         xor     8($Htbl,$nlo),$Zlo
166         shl     \$60,$tmp
167         xor     ($Htbl,$nlo),$Zhi
168         and     \$0xf0,`&LB("$nhi")`
169         xor     ($rem_4bit,$rem,8),$Zhi
170         mov     $Zlo,$rem
171         xor     $tmp,$Zlo
172
173         shr     \$4,$Zlo
174         and     \$0xf,$rem
175         mov     $Zhi,$tmp
176         shr     \$4,$Zhi
177         xor     8($Htbl,$nhi),$Zlo
178         shl     \$60,$tmp
179         xor     ($Htbl,$nhi),$Zhi
180         xor     $tmp,$Zlo
181         xor     ($rem_4bit,$rem,8),$Zhi
182
183         bswap   $Zlo
184         bswap   $Zhi
185 ___
186 }}
187
188 $code=<<___;
189 .text
190
191 .globl  gcm_gmult_4bit
192 .type   gcm_gmult_4bit,\@function,2
193 .align  16
194 gcm_gmult_4bit:
195         push    %rbx
196         push    %rbp            # %rbp and %r12 are pushed exclusively in
197         push    %r12            # order to reuse Win64 exception handler...
198 .Lgmult_prologue:
199
200         movzb   15($Xi),$Zlo
201         lea     .Lrem_4bit(%rip),$rem_4bit
202 ___
203         &loop   ($Xi);
204 $code.=<<___;
205         mov     $Zlo,8($Xi)
206         mov     $Zhi,($Xi)
207
208         mov     16(%rsp),%rbx
209         lea     24(%rsp),%rsp
210 .Lgmult_epilogue:
211         ret
212 .size   gcm_gmult_4bit,.-gcm_gmult_4bit
213 ___
214 \f
215 # per-function register layout
216 $inp="%rdx";
217 $len="%rcx";
218 $rem_8bit=$rem_4bit;
219
220 $code.=<<___;
221 .globl  gcm_ghash_4bit
222 .type   gcm_ghash_4bit,\@function,4
223 .align  16
224 gcm_ghash_4bit:
225         push    %rbx
226         push    %rbp
227         push    %r12
228         push    %r13
229         push    %r14
230         push    %r15
231         sub     \$280,%rsp
232 .Lghash_prologue:
233         mov     $inp,%r14               # reassign couple of args
234         mov     $len,%r15
235 ___
236 { my $inp="%r14";
237   my $dat="%edx";
238   my $len="%r15";
239   my @nhi=("%ebx","%ecx");
240   my @rem=("%r12","%r13");
241   my $Hshr4="%rbp";
242
243         &sub    ($Htbl,-128);           # size optimization
244         &lea    ($Hshr4,"16+128(%rsp)");
245         { my @lo =($nlo,$nhi);
246           my @hi =($Zlo,$Zhi);
247
248           &xor  ($dat,$dat);
249           for ($i=0,$j=-2;$i<18;$i++,$j++) {
250             &mov        ("$j(%rsp)",&LB($dat))          if ($i>1);
251             &or         ($lo[0],$tmp)                   if ($i>1);
252             &mov        (&LB($dat),&LB($lo[1]))         if ($i>0 && $i<17);
253             &shr        ($lo[1],4)                      if ($i>0 && $i<17);
254             &mov        ($tmp,$hi[1])                   if ($i>0 && $i<17);
255             &shr        ($hi[1],4)                      if ($i>0 && $i<17);
256             &mov        ("8*$j($Hshr4)",$hi[0])         if ($i>1);
257             &mov        ($hi[0],"16*$i+0-128($Htbl)")   if ($i<16);
258             &shl        (&LB($dat),4)                   if ($i>0 && $i<17);
259             &mov        ("8*$j-128($Hshr4)",$lo[0])     if ($i>1);
260             &mov        ($lo[0],"16*$i+8-128($Htbl)")   if ($i<16);
261             &shl        ($tmp,60)                       if ($i>0 && $i<17);
262
263             push        (@lo,shift(@lo));
264             push        (@hi,shift(@hi));
265           }
266         }
267         &add    ($Htbl,-128);
268         &mov    ($Zlo,"8($Xi)");
269         &mov    ($Zhi,"0($Xi)");
270         &add    ($len,$inp);            # pointer to the end of data
271         &lea    ($rem_8bit,".Lrem_8bit(%rip)");
272         &jmp    (".Louter_loop");
273
274 $code.=".align  16\n.Louter_loop:\n";
275         &xor    ($Zhi,"($inp)");
276         &mov    ("%rdx","8($inp)");
277         &lea    ($inp,"16($inp)");
278         &xor    ("%rdx",$Zlo);
279         &mov    ("($Xi)",$Zhi);
280         &mov    ("8($Xi)","%rdx");
281         &shr    ("%rdx",32);
282
283         &xor    ($nlo,$nlo);
284         &rol    ($dat,8);
285         &mov    (&LB($nlo),&LB($dat));
286         &movz   ($nhi[0],&LB($dat));
287         &shl    (&LB($nlo),4);
288         &shr    ($nhi[0],4);
289
290         for ($j=11,$i=0;$i<15;$i++) {
291             &rol        ($dat,8);
292             &xor        ($Zlo,"8($Htbl,$nlo)")                  if ($i>0);
293             &xor        ($Zhi,"($Htbl,$nlo)")                   if ($i>0);
294             &mov        ($Zlo,"8($Htbl,$nlo)")                  if ($i==0);
295             &mov        ($Zhi,"($Htbl,$nlo)")                   if ($i==0);
296
297             &mov        (&LB($nlo),&LB($dat));
298             &xor        ($Zlo,$tmp)                             if ($i>0);
299             &movzw      ($rem[1],"($rem_8bit,$rem[1],2)")       if ($i>0);
300
301             &movz       ($nhi[1],&LB($dat));
302             &shl        (&LB($nlo),4);
303             &movzb      ($rem[0],"(%rsp,$nhi[0])");
304
305             &shr        ($nhi[1],4)                             if ($i<14);
306             &and        ($nhi[1],0xf0)                          if ($i==14);
307             &shl        ($rem[1],48)                            if ($i>0);
308             &xor        ($rem[0],$Zlo);
309
310             &mov        ($tmp,$Zhi);
311             &xor        ($Zhi,$rem[1])                          if ($i>0);
312             &shr        ($Zlo,8);
313
314             &movz       ($rem[0],&LB($rem[0]));
315             &mov        ($dat,"$j($Xi)")                        if (--$j%4==0);
316             &shr        ($Zhi,8);
317
318             &xor        ($Zlo,"-128($Hshr4,$nhi[0],8)");
319             &shl        ($tmp,56);
320             &xor        ($Zhi,"($Hshr4,$nhi[0],8)");
321
322             unshift     (@nhi,pop(@nhi));               # "rotate" registers
323             unshift     (@rem,pop(@rem));
324         }
325         &movzw  ($rem[1],"($rem_8bit,$rem[1],2)");
326         &xor    ($Zlo,"8($Htbl,$nlo)");
327         &xor    ($Zhi,"($Htbl,$nlo)");
328
329         &shl    ($rem[1],48);
330         &xor    ($Zlo,$tmp);
331
332         &xor    ($Zhi,$rem[1]);
333         &movz   ($rem[0],&LB($Zlo));
334         &shr    ($Zlo,4);
335
336         &mov    ($tmp,$Zhi);
337         &shl    (&LB($rem[0]),4);
338         &shr    ($Zhi,4);
339
340         &xor    ($Zlo,"8($Htbl,$nhi[0])");
341         &movzw  ($rem[0],"($rem_8bit,$rem[0],2)");
342         &shl    ($tmp,60);
343
344         &xor    ($Zhi,"($Htbl,$nhi[0])");
345         &xor    ($Zlo,$tmp);
346         &shl    ($rem[0],48);
347
348         &bswap  ($Zlo);
349         &xor    ($Zhi,$rem[0]);
350
351         &bswap  ($Zhi);
352         &cmp    ($inp,$len);
353         &jb     (".Louter_loop");
354 }
355 $code.=<<___;
356         mov     $Zlo,8($Xi)
357         mov     $Zhi,($Xi)
358
359         lea     280(%rsp),%rsi
360         mov     0(%rsi),%r15
361         mov     8(%rsi),%r14
362         mov     16(%rsi),%r13
363         mov     24(%rsi),%r12
364         mov     32(%rsi),%rbp
365         mov     40(%rsi),%rbx
366         lea     48(%rsi),%rsp
367 .Lghash_epilogue:
368         ret
369 .size   gcm_ghash_4bit,.-gcm_ghash_4bit
370 ___
371 \f
372 ######################################################################
373 # PCLMULQDQ version.
374
375 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
376                 ("%rdi","%rsi","%rdx","%rcx");  # Unix order
377
378 ($Xi,$Xhi)=("%xmm0","%xmm1");   $Hkey="%xmm2";
379 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
380
381 sub clmul64x64_T2 {     # minimal register pressure
382 my ($Xhi,$Xi,$Hkey,$HK)=@_;
383
384 if (!defined($HK)) {    $HK = $T2;
385 $code.=<<___;
386         movdqa          $Xi,$Xhi                #
387         pshufd          \$0b01001110,$Xi,$T1
388         pshufd          \$0b01001110,$Hkey,$T2
389         pxor            $Xi,$T1                 #
390         pxor            $Hkey,$T2
391 ___
392 } else {
393 $code.=<<___;
394         movdqa          $Xi,$Xhi                #
395         pshufd          \$0b01001110,$Xi,$T1
396         pxor            $Xi,$T1                 #
397 ___
398 }
399 $code.=<<___;
400         pclmulqdq       \$0x00,$Hkey,$Xi        #######
401         pclmulqdq       \$0x11,$Hkey,$Xhi       #######
402         pclmulqdq       \$0x00,$HK,$T1          #######
403         pxor            $Xi,$T1                 #
404         pxor            $Xhi,$T1                #
405
406         movdqa          $T1,$T2                 #
407         psrldq          \$8,$T1
408         pslldq          \$8,$T2                 #
409         pxor            $T1,$Xhi
410         pxor            $T2,$Xi                 #
411 ___
412 }
413
414 sub reduction_alg9 {    # 17/11 times faster than Intel version
415 my ($Xhi,$Xi) = @_;
416
417 $code.=<<___;
418         # 1st phase
419         movdqa          $Xi,$T2                 #
420         movdqa          $Xi,$T1
421         psllq           \$5,$Xi
422         pxor            $Xi,$T1                 #
423         psllq           \$1,$Xi
424         pxor            $T1,$Xi                 #
425         psllq           \$57,$Xi                #
426         movdqa          $Xi,$T1                 #
427         pslldq          \$8,$Xi
428         psrldq          \$8,$T1                 #       
429         pxor            $T2,$Xi
430         pxor            $T1,$Xhi                #
431
432         # 2nd phase
433         movdqa          $Xi,$T2
434         psrlq           \$1,$Xi
435         pxor            $T2,$Xhi                #
436         pxor            $Xi,$T2
437         psrlq           \$5,$Xi
438         pxor            $T2,$Xi                 #
439         psrlq           \$1,$Xi                 #
440         pxor            $Xhi,$Xi                #
441 ___
442 }
443 \f
444 { my ($Htbl,$Xip)=@_4args;
445
446 $code.=<<___;
447 .globl  gcm_init_clmul
448 .type   gcm_init_clmul,\@abi-omnipotent
449 .align  16
450 gcm_init_clmul:
451         movdqu          ($Xip),$Hkey
452         pshufd          \$0b01001110,$Hkey,$Hkey        # dword swap
453
454         # <<1 twist
455         pshufd          \$0b11111111,$Hkey,$T2  # broadcast uppermost dword
456         movdqa          $Hkey,$T1
457         psllq           \$1,$Hkey
458         pxor            $T3,$T3                 #
459         psrlq           \$63,$T1
460         pcmpgtd         $T2,$T3                 # broadcast carry bit
461         pslldq          \$8,$T1
462         por             $T1,$Hkey               # H<<=1
463
464         # magic reduction
465         pand            .L0x1c2_polynomial(%rip),$T3
466         pxor            $T3,$Hkey               # if(carry) H^=0x1c2_polynomial
467
468         # calculate H^2
469         movdqa          $Hkey,$Xi
470 ___
471         &clmul64x64_T2  ($Xhi,$Xi,$Hkey);
472         &reduction_alg9 ($Xhi,$Xi);
473 $code.=<<___;
474         pshufd          \$0b01001110,$Hkey,$T1
475         pshufd          \$0b01001110,$Xi,$T2
476         pxor            $Hkey,$T1               # Karatsuba pre-processing
477         movdqu          $Hkey,0x00($Htbl)       # save H
478         pxor            $Xi,$T2                 # Karatsuba pre-processing
479         movdqu          $Xi,0x10($Htbl)         # save H^2
480         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
481         movdqu          $T2,0x20($Htbl)         # save Karatsuba "salt"
482 ___
483 if ($do4xaggr) {
484         &clmul64x64_T2  ($Xhi,$Xi,$Hkey);       # H^3
485         &reduction_alg9 ($Xhi,$Xi);
486 $code.=<<___;
487         movdqa          $Xi,$T3
488 ___
489         &clmul64x64_T2  ($Xhi,$Xi,$Hkey);       # H^4
490         &reduction_alg9 ($Xhi,$Xi);
491 $code.=<<___;
492         pshufd          \$0b01001110,$T3,$T1
493         pshufd          \$0b01001110,$Xi,$T2
494         pxor            $T3,$T1                 # Karatsuba pre-processing
495         movdqu          $T3,0x30($Htbl)         # save H^3
496         pxor            $Xi,$T2                 # Karatsuba pre-processing
497         movdqu          $Xi,0x40($Htbl)         # save H^4
498         palignr         \$8,$T1,$T2             # low part is H.lo^H.hi...
499         movdqu          $T2,0x50($Htbl)         # save Karatsuba "salt"
500 ___
501 }
502 $code.=<<___;
503         ret
504 .size   gcm_init_clmul,.-gcm_init_clmul
505 ___
506 }
507
508 { my ($Xip,$Htbl)=@_4args;
509
510 $code.=<<___;
511 .globl  gcm_gmult_clmul
512 .type   gcm_gmult_clmul,\@abi-omnipotent
513 .align  16
514 gcm_gmult_clmul:
515         movdqu          ($Xip),$Xi
516         movdqa          .Lbswap_mask(%rip),$T3
517         movdqu          ($Htbl),$Hkey
518         movdqu          0x20($Htbl),$T2
519         pshufb          $T3,$Xi
520 ___
521         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$T2);
522 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
523         # experimental alternative. special thing about is that there
524         # no dependency between the two multiplications... 
525         mov             \$`0xE1<<1`,%eax
526         mov             \$0xA040608020C0E000,%r10       # ((7..0)·0xE0)&0xff
527         mov             \$0x07,%r11d
528         movq            %rax,$T1
529         movq            %r10,$T2
530         movq            %r11,$T3                # borrow $T3
531         pand            $Xi,$T3
532         pshufb          $T3,$T2                 # ($Xi&7)·0xE0
533         movq            %rax,$T3
534         pclmulqdq       \$0x00,$Xi,$T1          # ·(0xE1<<1)
535         pxor            $Xi,$T2
536         pslldq          \$15,$T2
537         paddd           $T2,$T2                 # <<(64+56+1)
538         pxor            $T2,$Xi
539         pclmulqdq       \$0x01,$T3,$Xi
540         movdqa          .Lbswap_mask(%rip),$T3  # reload $T3
541         psrldq          \$1,$T1
542         pxor            $T1,$Xhi
543         pslldq          \$7,$Xi
544         pxor            $Xhi,$Xi
545 ___
546 $code.=<<___;
547         pshufb          $T3,$Xi
548         movdqu          $Xi,($Xip)
549         ret
550 .size   gcm_gmult_clmul,.-gcm_gmult_clmul
551 ___
552 }
553 \f
554 { my ($Xip,$Htbl,$inp,$len)=@_4args;
555   my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(6..10));
556
557 $code.=<<___;
558 .globl  gcm_ghash_clmul
559 .type   gcm_ghash_clmul,\@abi-omnipotent
560 .align  32
561 gcm_ghash_clmul:
562 ___
563 $code.=<<___ if ($win64);
564         lea     -0x88(%rsp),%rax
565 .LSEH_begin_gcm_ghash_clmul:
566         # I can't trust assembler to use specific encoding:-(
567         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
568         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
569         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
570         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
571         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
572         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
573         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
574         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
575         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
576         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
577         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
578 ___
579 $code.=<<___;
580         movdqa          .Lbswap_mask(%rip),$T3
581         mov             \$0xA040608020C0E000,%rax       # ((7..0)·0xE0)&0xff
582
583         movdqu          ($Xip),$Xi
584         movdqu          ($Htbl),$Hkey
585         movdqu          0x20($Htbl),$HK
586         pshufb          $T3,$Xi
587
588         sub             \$0x10,$len
589         jz              .Lodd_tail
590
591         movdqu          0x10($Htbl),$Hkey2
592 ___
593 if ($do4xaggr) {
594 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
595
596 $code.=<<___;
597         cmp             \$0x30,$len
598         jb              .Lskip4x
599
600         sub             \$0x30,$len
601         movdqu          0x30($Htbl),$Hkey3
602         movdqu          0x40($Htbl),$Hkey4
603
604         #######
605         # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
606         #
607         movdqu          0x30($inp),$Xln
608          movdqu         0x20($inp),$Xl
609         pshufb          $T3,$Xln
610          pshufb         $T3,$Xl
611         movdqa          $Xln,$Xhn
612         pshufd          \$0b01001110,$Xln,$Xmn
613         pxor            $Xln,$Xmn
614         pclmulqdq       \$0x00,$Hkey,$Xln
615         pclmulqdq       \$0x11,$Hkey,$Xhn
616         pclmulqdq       \$0x00,$HK,$Xmn
617
618         movdqa          $Xl,$Xh
619         pshufd          \$0b01001110,$Xl,$Xm
620         pxor            $Xl,$Xm
621         pclmulqdq       \$0x00,$Hkey2,$Xl
622         pclmulqdq       \$0x11,$Hkey2,$Xh
623         xorps           $Xl,$Xln
624         pclmulqdq       \$0x10,$HK,$Xm
625         xorps           $Xh,$Xhn
626         movups          0x50($Htbl),$HK
627         xorps           $Xm,$Xmn
628
629         movdqu          0x10($inp),$Xl
630          movdqu         0($inp),$T1
631         pshufb          $T3,$Xl
632          pshufb         $T3,$T1
633         movdqa          $Xl,$Xh
634         pshufd          \$0b01001110,$Xl,$Xm
635          pxor           $T1,$Xi
636         pxor            $Xl,$Xm
637         pclmulqdq       \$0x00,$Hkey3,$Xl
638          movdqa         $Xi,$Xhi
639          pshufd         \$0b01001110,$Xi,$T1
640          pxor           $Xi,$T1
641         pclmulqdq       \$0x11,$Hkey3,$Xh
642         xorps           $Xl,$Xln
643         pclmulqdq       \$0x00,$HK,$Xm
644         xorps           $Xh,$Xhn
645
646         lea     0x40($inp),$inp
647         sub     \$0x40,$len
648         jc      .Ltail4x
649
650         jmp     .Lmod4_loop
651 .align  32
652 .Lmod4_loop:
653         pclmulqdq       \$0x00,$Hkey4,$Xi
654         xorps           $Xm,$Xmn
655          movdqu         0x30($inp),$Xl
656          pshufb         $T3,$Xl
657         pclmulqdq       \$0x11,$Hkey4,$Xhi
658         xorps           $Xln,$Xi
659          movdqu         0x20($inp),$Xln
660          movdqa         $Xl,$Xh
661          pshufd         \$0b01001110,$Xl,$Xm
662         pclmulqdq       \$0x10,$HK,$T1
663         xorps           $Xhn,$Xhi
664          pxor           $Xl,$Xm
665          pshufb         $T3,$Xln
666         movups          0x20($Htbl),$HK
667          pclmulqdq      \$0x00,$Hkey,$Xl
668         xorps           $Xmn,$T1
669          movdqa         $Xln,$Xhn
670          pshufd         \$0b01001110,$Xln,$Xmn
671
672         pxor            $Xi,$T1                 # aggregated Karatsuba post-processing
673          pxor           $Xln,$Xmn
674         pxor            $Xhi,$T1                #
675         movdqa          $T1,$T2                 #
676         pslldq          \$8,$T1
677          pclmulqdq      \$0x11,$Hkey,$Xh
678         psrldq          \$8,$T2                 #
679         pxor            $T1,$Xi
680         movdqa          .L7_mask(%rip),$T1
681         pxor            $T2,$Xhi                #
682         movq            %rax,$T2
683
684         pand            $Xi,$T1                 # 1st phase
685         pshufb          $T1,$T2                 #
686          pclmulqdq      \$0x00,$HK,$Xm
687         pxor            $Xi,$T2                 #
688         psllq           \$57,$T2                #
689         movdqa          $T2,$T1                 #
690         pslldq          \$8,$T2
691          pclmulqdq      \$0x00,$Hkey2,$Xln
692         psrldq          \$8,$T1                 #       
693         pxor            $T2,$Xi
694         pxor            $T1,$Xhi                #
695         movdqu          0($inp),$T1
696
697         movdqa          $Xi,$T2                 # 2nd phase
698         psrlq           \$1,$Xi
699          pclmulqdq      \$0x11,$Hkey2,$Xhn
700          xorps          $Xl,$Xln
701          movdqu         0x10($inp),$Xl
702          pshufb         $T3,$Xl
703          pclmulqdq      \$0x10,$HK,$Xmn
704          xorps          $Xh,$Xhn
705          movups         0x50($Htbl),$HK
706         pshufb          $T3,$T1
707         pxor            $T2,$Xhi                #
708         pxor            $Xi,$T2
709         psrlq           \$5,$Xi
710
711          movdqa         $Xl,$Xh
712          pxor           $Xm,$Xmn
713          pshufd         \$0b01001110,$Xl,$Xm
714          pxor           $Xl,$Xm
715          pclmulqdq      \$0x00,$Hkey3,$Xl
716         pxor            $T2,$Xi                 #
717         pxor            $T1,$Xhi
718         psrlq           \$1,$Xi                 #
719          pclmulqdq      \$0x11,$Hkey3,$Xh
720          xorps          $Xl,$Xln
721         pxor            $Xhi,$Xi                #
722
723          pclmulqdq      \$0x00,$HK,$Xm
724          xorps          $Xh,$Xhn
725
726         movdqa          $Xi,$Xhi
727         pshufd          \$0b01001110,$Xi,$T1
728         pxor            $Xi,$T1
729
730         lea     0x40($inp),$inp
731         sub     \$0x40,$len
732         jnc     .Lmod4_loop
733
734 .Ltail4x:
735         pclmulqdq       \$0x00,$Hkey4,$Xi
736         xorps           $Xm,$Xmn
737         pclmulqdq       \$0x11,$Hkey4,$Xhi
738         xorps           $Xln,$Xi
739         pclmulqdq       \$0x10,$HK,$T1
740         xorps           $Xhn,$Xhi
741         pxor            $Xi,$Xhi                # aggregated Karatsuba post-processing
742         pxor            $Xmn,$T1
743
744         pxor            $Xhi,$T1                #
745         pxor            $Xi,$Xhi
746
747         movdqa          $T1,$T2                 #
748         psrldq          \$8,$T1
749         pslldq          \$8,$T2                 #
750         pxor            $T1,$Xhi
751         pxor            $T2,$Xi                 #
752 ___
753         &reduction_alg9($Xhi,$Xi);
754 $code.=<<___;
755         add     \$0x40,$len
756         jz      .Ldone
757         movdqu  0x20($Htbl),$HK
758         sub     \$0x10,$len
759         jz      .Lodd_tail
760 .Lskip4x:
761 ___
762 }
763 $code.=<<___;
764         #######
765         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
766         #       [(H*Ii+1) + (H*Xi+1)] mod P =
767         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
768         #
769         movdqu          ($inp),$T1              # Ii
770         movdqu          16($inp),$Xln           # Ii+1
771         pshufb          $T3,$T1
772         pshufb          $T3,$Xln
773         pxor            $T1,$Xi                 # Ii+Xi
774
775         movdqa          $Xln,$Xhn
776         pshufd          \$0b01001110,$Xln,$Xmn
777         pxor            $Xln,$Xmn
778         pclmulqdq       \$0x00,$Hkey,$Xln
779         pclmulqdq       \$0x11,$Hkey,$Xhn
780         pclmulqdq       \$0x00,$HK,$Xmn
781
782         movdqa          $Xi,$Xhi
783         pshufd          \$0b01001110,$Xi,$T1    #
784         pxor            $Xi,$T1                 #
785
786         lea             32($inp),$inp           # i+=2
787         sub             \$0x20,$len
788         jbe             .Leven_tail
789         jmp             .Lmod_loop
790
791 .align  32
792 .Lmod_loop:
793         pclmulqdq       \$0x00,$Hkey2,$Xi
794         pclmulqdq       \$0x11,$Hkey2,$Xhi
795           movdqu        ($inp),$T2              # Ii
796         pclmulqdq       \$0x10,$HK,$T1
797           pshufb        $T3,$T2
798
799         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
800          movdqu         16($inp),$Xln           # Ii+1
801         pxor            $Xhn,$Xhi
802
803         pxor            $Xi,$Xmn                # aggregated Karatsuba post-processing
804         pxor            $Xhi,$Xmn
805           pxor          $T2,$Xhi                # "Ii+Xi", consume early
806         pxor            $Xmn,$T1
807          pshufb         $T3,$Xln
808         movdqa          $T1,$T2                 #
809         psrldq          \$8,$T1
810         pslldq          \$8,$T2                 #
811         pxor            $T1,$Xhi
812         pxor            $T2,$Xi                 #
813
814         movdqa          $Xln,$Xhn               #
815         pshufd          \$0b01001110,$Xln,$Xmn
816         pxor            $Xln,$Xmn               #
817
818           movdqa        $Xi,$T2                 # 1st phase
819           movdqa        $Xi,$T1
820           psllq         \$5,$Xi
821         pclmulqdq       \$0x00,$Hkey,$Xln       #######
822           pxor          $Xi,$T1                 #
823           psllq         \$1,$Xi
824           pxor          $T1,$Xi                 #
825           psllq         \$57,$Xi                #
826           movdqa        $Xi,$T1                 #
827           pslldq        \$8,$Xi
828           psrldq        \$8,$T1                 #       
829           pxor          $T2,$Xi
830           pxor          $T1,$Xhi                #
831
832         pclmulqdq       \$0x11,$Hkey,$Xhn       #######
833           movdqa        $Xi,$T2                 # 2nd phase
834           psrlq         \$1,$Xi
835           pxor          $T2,$Xhi                #
836           pxor          $Xi,$T2
837           psrlq         \$5,$Xi
838           pxor          $T2,$Xi                 #
839           psrlq         \$1,$Xi                 #
840         pclmulqdq       \$0x00,$HK,$Xmn         #######
841           pxor          $Xhi,$Xi                #
842
843          movdqa         $Xi,$Xhi
844          pshufd         \$0b01001110,$Xi,$T1    #
845          pxor           $Xi,$T1                 #
846
847         lea             32($inp),$inp
848         sub             \$0x20,$len
849         ja              .Lmod_loop
850
851 .Leven_tail:
852         pclmulqdq       \$0x00,$Hkey2,$Xi
853         pclmulqdq       \$0x11,$Hkey2,$Xhi
854         pclmulqdq       \$0x10,$HK,$T1
855
856         pxor            $Xln,$Xi                # (H*Ii+1) + H^2*(Ii+Xi)
857         pxor            $Xhn,$Xhi
858         pxor            $Xi,$Xmn
859         pxor            $Xhi,$Xmn
860         pxor            $Xmn,$T1
861         movdqa          $T1,$T2                 #
862         psrldq          \$8,$T1
863         pslldq          \$8,$T2                 #
864         pxor            $T1,$Xhi
865         pxor            $T2,$Xi                 #
866 ___
867         &reduction_alg9 ($Xhi,$Xi);
868 $code.=<<___;
869         test            $len,$len
870         jnz             .Ldone
871
872 .Lodd_tail:
873         movdqu          ($inp),$T1              # Ii
874         pshufb          $T3,$T1
875         pxor            $T1,$Xi                 # Ii+Xi
876 ___
877         &clmul64x64_T2  ($Xhi,$Xi,$Hkey,$HK);   # H*(Ii+Xi)
878         &reduction_alg9 ($Xhi,$Xi);
879 $code.=<<___;
880 .Ldone:
881         pshufb          $T3,$Xi
882         movdqu          $Xi,($Xip)
883 ___
884 $code.=<<___ if ($win64);
885         movaps  (%rsp),%xmm6
886         movaps  0x10(%rsp),%xmm7
887         movaps  0x20(%rsp),%xmm8
888         movaps  0x30(%rsp),%xmm9
889         movaps  0x40(%rsp),%xmm10
890         movaps  0x50(%rsp),%xmm11
891         movaps  0x60(%rsp),%xmm12
892         movaps  0x70(%rsp),%xmm13
893         movaps  0x80(%rsp),%xmm14
894         movaps  0x90(%rsp),%xmm15
895         lea     0xa8(%rsp),%rsp
896 ___
897 $code.=<<___;
898         ret
899 .LSEH_end_gcm_ghash_clmul:
900 .size   gcm_ghash_clmul,.-gcm_ghash_clmul
901 ___
902 }
903
904 $code.=<<___;
905 .align  64
906 .Lbswap_mask:
907         .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
908 .L0x1c2_polynomial:
909         .byte   1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
910 .L7_mask:
911         .long   7,0,7,0
912 .L7_mask_poly:
913         .long   7,0,`0xE1<<1`,0
914 .align  64
915 .type   .Lrem_4bit,\@object
916 .Lrem_4bit:
917         .long   0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
918         .long   0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
919         .long   0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
920         .long   0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
921 .type   .Lrem_8bit,\@object
922 .Lrem_8bit:
923         .value  0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
924         .value  0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
925         .value  0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
926         .value  0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
927         .value  0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
928         .value  0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
929         .value  0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
930         .value  0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
931         .value  0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
932         .value  0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
933         .value  0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
934         .value  0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
935         .value  0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
936         .value  0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
937         .value  0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
938         .value  0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
939         .value  0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
940         .value  0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
941         .value  0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
942         .value  0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
943         .value  0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
944         .value  0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
945         .value  0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
946         .value  0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
947         .value  0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
948         .value  0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
949         .value  0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
950         .value  0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
951         .value  0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
952         .value  0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
953         .value  0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
954         .value  0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
955
956 .asciz  "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
957 .align  64
958 ___
959 \f
960 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
961 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
962 if ($win64) {
963 $rec="%rcx";
964 $frame="%rdx";
965 $context="%r8";
966 $disp="%r9";
967
968 $code.=<<___;
969 .extern __imp_RtlVirtualUnwind
970 .type   se_handler,\@abi-omnipotent
971 .align  16
972 se_handler:
973         push    %rsi
974         push    %rdi
975         push    %rbx
976         push    %rbp
977         push    %r12
978         push    %r13
979         push    %r14
980         push    %r15
981         pushfq
982         sub     \$64,%rsp
983
984         mov     120($context),%rax      # pull context->Rax
985         mov     248($context),%rbx      # pull context->Rip
986
987         mov     8($disp),%rsi           # disp->ImageBase
988         mov     56($disp),%r11          # disp->HandlerData
989
990         mov     0(%r11),%r10d           # HandlerData[0]
991         lea     (%rsi,%r10),%r10        # prologue label
992         cmp     %r10,%rbx               # context->Rip<prologue label
993         jb      .Lin_prologue
994
995         mov     152($context),%rax      # pull context->Rsp
996
997         mov     4(%r11),%r10d           # HandlerData[1]
998         lea     (%rsi,%r10),%r10        # epilogue label
999         cmp     %r10,%rbx               # context->Rip>=epilogue label
1000         jae     .Lin_prologue
1001
1002         lea     24(%rax),%rax           # adjust "rsp"
1003
1004         mov     -8(%rax),%rbx
1005         mov     -16(%rax),%rbp
1006         mov     -24(%rax),%r12
1007         mov     %rbx,144($context)      # restore context->Rbx
1008         mov     %rbp,160($context)      # restore context->Rbp
1009         mov     %r12,216($context)      # restore context->R12
1010
1011 .Lin_prologue:
1012         mov     8(%rax),%rdi
1013         mov     16(%rax),%rsi
1014         mov     %rax,152($context)      # restore context->Rsp
1015         mov     %rsi,168($context)      # restore context->Rsi
1016         mov     %rdi,176($context)      # restore context->Rdi
1017
1018         mov     40($disp),%rdi          # disp->ContextRecord
1019         mov     $context,%rsi           # context
1020         mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
1021         .long   0xa548f3fc              # cld; rep movsq
1022
1023         mov     $disp,%rsi
1024         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1025         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1026         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1027         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1028         mov     40(%rsi),%r10           # disp->ContextRecord
1029         lea     56(%rsi),%r11           # &disp->HandlerData
1030         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1031         mov     %r10,32(%rsp)           # arg5
1032         mov     %r11,40(%rsp)           # arg6
1033         mov     %r12,48(%rsp)           # arg7
1034         mov     %rcx,56(%rsp)           # arg8, (NULL)
1035         call    *__imp_RtlVirtualUnwind(%rip)
1036
1037         mov     \$1,%eax                # ExceptionContinueSearch
1038         add     \$64,%rsp
1039         popfq
1040         pop     %r15
1041         pop     %r14
1042         pop     %r13
1043         pop     %r12
1044         pop     %rbp
1045         pop     %rbx
1046         pop     %rdi
1047         pop     %rsi
1048         ret
1049 .size   se_handler,.-se_handler
1050
1051 .section        .pdata
1052 .align  4
1053         .rva    .LSEH_begin_gcm_gmult_4bit
1054         .rva    .LSEH_end_gcm_gmult_4bit
1055         .rva    .LSEH_info_gcm_gmult_4bit
1056
1057         .rva    .LSEH_begin_gcm_ghash_4bit
1058         .rva    .LSEH_end_gcm_ghash_4bit
1059         .rva    .LSEH_info_gcm_ghash_4bit
1060
1061         .rva    .LSEH_begin_gcm_ghash_clmul
1062         .rva    .LSEH_end_gcm_ghash_clmul
1063         .rva    .LSEH_info_gcm_ghash_clmul
1064
1065 .section        .xdata
1066 .align  8
1067 .LSEH_info_gcm_gmult_4bit:
1068         .byte   9,0,0,0
1069         .rva    se_handler
1070         .rva    .Lgmult_prologue,.Lgmult_epilogue       # HandlerData
1071 .LSEH_info_gcm_ghash_4bit:
1072         .byte   9,0,0,0
1073         .rva    se_handler
1074         .rva    .Lghash_prologue,.Lghash_epilogue       # HandlerData
1075 .LSEH_info_gcm_ghash_clmul:
1076         .byte   0x01,0x33,0x16,0x00
1077         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1078         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1079         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1080         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1081         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1082         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1083         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1084         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1085         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1086         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1087         .byte   0x04,0x01,0x15,0x00     #sub    0xa8,rsp
1088 ___
1089 }
1090 \f
1091 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1092
1093 print $code;
1094
1095 close STDOUT;