aes-x86_64.pl: Atom-specific optimizations, +10%.
[openssl.git] / crypto / aes / asm / aes-x86_64.pl
index c2b040a84fbfcbeff3cab105876e767e62c400b0..01f9cb7f8e020063dda8c57446c4d1f20127966f 100755 (executable)
 # Performance in number of cycles per processed byte for 128-bit key:
 #
 #              ECB encrypt     ECB decrypt     CBC large chunk
-# AMD64                33              41              13.0
-# EM64T                38              59              18.6(*)
-# Core 2       30              43              14.5(*)
+# AMD64                33              43              13.0
+# EM64T                38              56              18.6(*)
+# Core 2       30              42              14.5(*)
+# Atom         65              86              32.1(*)
 #
 # (*) with hyper-threading off
 
-$verticalspin=1;       # unlike 32-bit version $verticalspin performs
-                       # ~15% better on both AMD and Intel cores
-$speed_limit=512;      # see aes-586.pl for details
-$output=shift;
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
 
 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
 die "can't locate x86_64-xlate.pl";
 
-open STDOUT,"| $^X $xlate $output";
+open STDOUT,"| \"$^X\" $xlate $flavour $output";
+
+$verticalspin=1;       # unlike 32-bit version $verticalspin performs
+                       # ~15% better on both AMD and Intel cores
+$speed_limit=512;      # see aes-586.pl for details
 
 $code=".text\n";
 
@@ -360,68 +366,66 @@ $code.=<<___;
        movzb   `&lo("$s0")`,$t0
        movzb   `&lo("$s1")`,$t1
        movzb   `&lo("$s2")`,$t2
-       movzb   ($sbox,$t0,1),$t0
-       movzb   ($sbox,$t1,1),$t1
-       movzb   ($sbox,$t2,1),$t2
-
        movzb   `&lo("$s3")`,$t3
        movzb   `&hi("$s1")`,$acc0
        movzb   `&hi("$s2")`,$acc1
+       shr     \$16,$s2
+       movzb   `&hi("$s3")`,$acc2
+       movzb   ($sbox,$t0,1),$t0
+       movzb   ($sbox,$t1,1),$t1
+       movzb   ($sbox,$t2,1),$t2
        movzb   ($sbox,$t3,1),$t3
-       movzb   ($sbox,$acc0,1),$t4     #$t0
-       movzb   ($sbox,$acc1,1),$t5     #$t1
 
-       movzb   `&hi("$s3")`,$acc2
+       movzb   ($sbox,$acc0,1),$t4     #$t0
        movzb   `&hi("$s0")`,$acc0
-       shr     \$16,$s2
+       movzb   ($sbox,$acc1,1),$t5     #$t1
+       movzb   `&lo("$s2")`,$acc1
        movzb   ($sbox,$acc2,1),$acc2   #$t2
        movzb   ($sbox,$acc0,1),$acc0   #$t3
-       shr     \$16,$s3
 
-       movzb   `&lo("$s2")`,$acc1
        shl     \$8,$t4
+       shr     \$16,$s3
        shl     \$8,$t5
-       movzb   ($sbox,$acc1,1),$acc1   #$t0
        xor     $t4,$t0
-       xor     $t5,$t1
-
-       movzb   `&lo("$s3")`,$t4
        shr     \$16,$s0
+       movzb   `&lo("$s3")`,$t4
        shr     \$16,$s1
-       movzb   `&lo("$s0")`,$t5
+       xor     $t5,$t1
        shl     \$8,$acc2
-       shl     \$8,$acc0
-       movzb   ($sbox,$t4,1),$t4       #$t1
-       movzb   ($sbox,$t5,1),$t5       #$t2
+       movzb   `&lo("$s0")`,$t5
+       movzb   ($sbox,$acc1,1),$acc1   #$t0
        xor     $acc2,$t2
-       xor     $acc0,$t3
 
+       shl     \$8,$acc0
        movzb   `&lo("$s1")`,$acc2
-       movzb   `&hi("$s3")`,$acc0
        shl     \$16,$acc1
-       movzb   ($sbox,$acc2,1),$acc2   #$t3
-       movzb   ($sbox,$acc0,1),$acc0   #$t0
+       xor     $acc0,$t3
+       movzb   ($sbox,$t4,1),$t4       #$t1
+       movzb   `&hi("$s3")`,$acc0
+       movzb   ($sbox,$t5,1),$t5       #$t2
        xor     $acc1,$t0
 
-       movzb   `&hi("$s0")`,$acc1
        shr     \$8,$s2
+       movzb   `&hi("$s0")`,$acc1
+       shl     \$16,$t4
        shr     \$8,$s1
+       shl     \$16,$t5
+       xor     $t4,$t1
+       movzb   ($sbox,$acc2,1),$acc2   #$t3
+       movzb   ($sbox,$acc0,1),$acc0   #$t0
        movzb   ($sbox,$acc1,1),$acc1   #$t1
        movzb   ($sbox,$s2,1),$s3       #$t3
        movzb   ($sbox,$s1,1),$s2       #$t2
-       shl     \$16,$t4
-       shl     \$16,$t5
+
        shl     \$16,$acc2
-       xor     $t4,$t1
        xor     $t5,$t2
-       xor     $acc2,$t3
-
        shl     \$24,$acc0
+       xor     $acc2,$t3
        shl     \$24,$acc1
-       shl     \$24,$s3
        xor     $acc0,$t0
-       shl     \$24,$s2
+       shl     \$24,$s3
        xor     $acc1,$t1
+       shl     \$24,$s2
        mov     $t0,$s0
        mov     $t1,$s1
        xor     $t2,$s2
@@ -460,12 +464,12 @@ sub enctransform()
 { my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
 
 $code.=<<___;
-       mov     $s0,$acc0
-       mov     $s1,$acc1
-       and     \$0x80808080,$acc0
-       and     \$0x80808080,$acc1
-       mov     $acc0,$t0
-       mov     $acc1,$t1
+       mov     \$0x80808080,$t0
+       mov     \$0x80808080,$t1
+       and     $s0,$t0
+       and     $s1,$t1
+       mov     $t0,$acc0
+       mov     $t1,$acc1
        shr     \$7,$t0
        lea     ($s0,$s0),$r20
        shr     \$7,$t1
@@ -483,25 +487,25 @@ $code.=<<___;
 
        xor     $r20,$s0
        xor     $r21,$s1
-        mov    $s2,$acc0
-        mov    $s3,$acc1
+        mov    \$0x80808080,$t2
        rol     \$24,$s0
+        mov    \$0x80808080,$t3
        rol     \$24,$s1
-        and    \$0x80808080,$acc0
-        and    \$0x80808080,$acc1
+        and    $s2,$t2
+        and    $s3,$t3
        xor     $r20,$s0
        xor     $r21,$s1
-        mov    $acc0,$t2
-        mov    $acc1,$t3
+        mov    $t2,$acc0
        ror     \$16,$t0
+        mov    $t3,$acc1
        ror     \$16,$t1
-        shr    \$7,$t2
         lea    ($s2,$s2),$r20
+        shr    \$7,$t2
        xor     $t0,$s0
-       xor     $t1,$s1
         shr    \$7,$t3
-        lea    ($s3,$s3),$r21
+       xor     $t1,$s1
        ror     \$8,$t0
+        lea    ($s3,$s3),$r21
        ror     \$8,$t1
         sub    $t2,$acc0
         sub    $t3,$acc1
@@ -517,23 +521,23 @@ $code.=<<___;
        xor     $acc0,$r20
        xor     $acc1,$r21
 
+       ror     \$16,$t2
        xor     $r20,$s2
+       ror     \$16,$t3
        xor     $r21,$s3
        rol     \$24,$s2
+       mov     0($sbox),$acc0                  # prefetch Te4
        rol     \$24,$s3
        xor     $r20,$s2
-       xor     $r21,$s3
-       mov     0($sbox),$acc0                  # prefetch Te4
-       ror     \$16,$t2
-       ror     \$16,$t3
        mov     64($sbox),$acc1
-       xor     $t2,$s2
-       xor     $t3,$s3
+       xor     $r21,$s3
        mov     128($sbox),$r20
+       xor     $t2,$s2
        ror     \$8,$t2
+       xor     $t3,$s3
        ror     \$8,$t3
-       mov     192($sbox),$r21
        xor     $t2,$s2
+       mov     192($sbox),$r21
        xor     $t3,$s3
 ___
 }
@@ -583,6 +587,9 @@ $code.=<<___;
 .globl AES_encrypt
 .type  AES_encrypt,\@function,3
 .align 16
+.globl asm_AES_encrypt
+.hidden        asm_AES_encrypt
+asm_AES_encrypt:
 AES_encrypt:
        push    %rbx
        push    %rbp
@@ -592,18 +599,20 @@ AES_encrypt:
        push    %r15
 
        # allocate frame "above" key schedule
-       mov     %rsp,%rax
-       mov     %rdx,$key
-       lea     -63(%rdx),%rcx
+       mov     %rsp,%r10
+       lea     -63(%rdx),%rcx  # %rdx is key argument
        and     \$-64,%rsp
        sub     %rsp,%rcx
        neg     %rcx
        and     \$0x3c0,%rcx
        sub     %rcx,%rsp
+       sub     \$32,%rsp
 
-       push    %rax            # save real stack pointer
-       push    %rsi            # save out
+       mov     %rsi,16(%rsp)   # save out
+       mov     %r10,24(%rsp)   # save real stack pointer
+.Lenc_prologue:
 
+       mov     %rdx,$key
        mov     240($key),$rnds # load rounds
 
        mov     0(%rdi),$s0     # load input vector
@@ -613,8 +622,8 @@ AES_encrypt:
 
        shl     \$4,$rnds
        lea     ($key,$rnds),%rbp
-       push    %rbp
-       push    $key
+       mov     $key,(%rsp)     # key schedule
+       mov     %rbp,8(%rsp)    # end of key schedule
 
        # pick Te4 copy which can't "overlap" with stack frame or key schedule
        lea     .LAES_Te+2048(%rip),$sbox
@@ -626,18 +635,20 @@ AES_encrypt:
        call    _x86_64_AES_encrypt_compact
 
        mov     16(%rsp),$out   # restore out
-       mov     24(%rsp),%rsp
+       mov     24(%rsp),%rsi   # restore saved stack pointer
        mov     $s0,0($out)     # write output vector
        mov     $s1,4($out)
        mov     $s2,8($out)
        mov     $s3,12($out)
 
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
+       mov     (%rsi),%r15
+       mov     8(%rsi),%r14
+       mov     16(%rsi),%r13
+       mov     24(%rsi),%r12
+       mov     32(%rsi),%rbp
+       mov     40(%rsi),%rbx
+       lea     48(%rsi),%rsp
+.Lenc_epilogue:
        ret
 .size  AES_encrypt,.-AES_encrypt
 ___
@@ -923,70 +934,69 @@ $code.=<<___;
        movzb   `&lo("$s0")`,$t0
        movzb   `&lo("$s1")`,$t1
        movzb   `&lo("$s2")`,$t2
-       movzb   ($sbox,$t0,1),$t0
-       movzb   ($sbox,$t1,1),$t1
-       movzb   ($sbox,$t2,1),$t2
-
        movzb   `&lo("$s3")`,$t3
        movzb   `&hi("$s3")`,$acc0
        movzb   `&hi("$s0")`,$acc1
+       shr     \$16,$s3
+       movzb   `&hi("$s1")`,$acc2
+       movzb   ($sbox,$t0,1),$t0
+       movzb   ($sbox,$t1,1),$t1
+       movzb   ($sbox,$t2,1),$t2
        movzb   ($sbox,$t3,1),$t3
-       movzb   ($sbox,$acc0,1),$t4     #$t0
-       movzb   ($sbox,$acc1,1),$t5     #$t1
 
-       movzb   `&hi("$s1")`,$acc2
+       movzb   ($sbox,$acc0,1),$t4     #$t0
        movzb   `&hi("$s2")`,$acc0
-       shr     \$16,$s2
+       movzb   ($sbox,$acc1,1),$t5     #$t1
        movzb   ($sbox,$acc2,1),$acc2   #$t2
        movzb   ($sbox,$acc0,1),$acc0   #$t3
-       shr     \$16,$s3
 
-       movzb   `&lo("$s2")`,$acc1
-       shl     \$8,$t4
+       shr     \$16,$s2
        shl     \$8,$t5
-       movzb   ($sbox,$acc1,1),$acc1   #$t0
-       xor     $t4,$t0
-       xor     $t5,$t1
-
-       movzb   `&lo("$s3")`,$t4
+       shl     \$8,$t4
+       movzb   `&lo("$s2")`,$acc1
        shr     \$16,$s0
+       xor     $t4,$t0
        shr     \$16,$s1
-       movzb   `&lo("$s0")`,$t5
+       movzb   `&lo("$s3")`,$t4
+
        shl     \$8,$acc2
+       xor     $t5,$t1
        shl     \$8,$acc0
-       movzb   ($sbox,$t4,1),$t4       #$t1
-       movzb   ($sbox,$t5,1),$t5       #$t2
+       movzb   `&lo("$s0")`,$t5
+       movzb   ($sbox,$acc1,1),$acc1   #$t0
        xor     $acc2,$t2
-       xor     $acc0,$t3
-
        movzb   `&lo("$s1")`,$acc2
-       movzb   `&hi("$s1")`,$acc0
+
        shl     \$16,$acc1
+       xor     $acc0,$t3
+       movzb   ($sbox,$t4,1),$t4       #$t1
+       movzb   `&hi("$s1")`,$acc0
        movzb   ($sbox,$acc2,1),$acc2   #$t3
-       movzb   ($sbox,$acc0,1),$acc0   #$t0
        xor     $acc1,$t0
-
+       movzb   ($sbox,$t5,1),$t5       #$t2
        movzb   `&hi("$s2")`,$acc1
+
+       shl     \$16,$acc2
        shl     \$16,$t4
        shl     \$16,$t5
-       movzb   ($sbox,$acc1,1),$s1     #$t1
+       xor     $acc2,$t3
+       movzb   `&hi("$s3")`,$acc2
        xor     $t4,$t1
+       shr     \$8,$s0
        xor     $t5,$t2
 
-       movzb   `&hi("$s3")`,$acc1
-       shr     \$8,$s0
-       shl     \$16,$acc2
-       movzb   ($sbox,$acc1,1),$s2     #$t2
+       movzb   ($sbox,$acc0,1),$acc0   #$t0
+       movzb   ($sbox,$acc1,1),$s1     #$t1
+       movzb   ($sbox,$acc2,1),$s2     #$t2
        movzb   ($sbox,$s0,1),$s3       #$t3
-       xor     $acc2,$t3
 
+       mov     $t0,$s0
        shl     \$24,$acc0
        shl     \$24,$s1
        shl     \$24,$s2
-       xor     $acc0,$t0
+       xor     $acc0,$s0
        shl     \$24,$s3
        xor     $t1,$s1
-       mov     $t0,$s0
        xor     $t2,$s2
        xor     $t3,$s3
 ___
@@ -1001,12 +1011,12 @@ sub dectransform()
   my $prefetch = shift;
 
 $code.=<<___;
-       mov     $tp10,$acc0
-       mov     $tp18,$acc8
-       and     $mask80,$acc0
-       and     $mask80,$acc8
-       mov     $acc0,$tp40
-       mov     $acc8,$tp48
+       mov     $mask80,$tp40
+       mov     $mask80,$tp48
+       and     $tp10,$tp40
+       and     $tp18,$tp48
+       mov     $tp40,$acc0
+       mov     $tp48,$acc8
        shr     \$7,$tp40
        lea     ($tp10,$tp10),$tp20
        shr     \$7,$tp48
@@ -1017,15 +1027,15 @@ $code.=<<___;
        and     $maskfe,$tp28
        and     $mask1b,$acc0
        and     $mask1b,$acc8
-       xor     $tp20,$acc0
-       xor     $tp28,$acc8
-       mov     $acc0,$tp20
-       mov     $acc8,$tp28
-
-       and     $mask80,$acc0
-       and     $mask80,$acc8
-       mov     $acc0,$tp80
-       mov     $acc8,$tp88
+       xor     $acc0,$tp20
+       xor     $acc8,$tp28
+       mov     $mask80,$tp80
+       mov     $mask80,$tp88
+
+       and     $tp20,$tp80
+       and     $tp28,$tp88
+       mov     $tp80,$acc0
+       mov     $tp88,$acc8
        shr     \$7,$tp80
        lea     ($tp20,$tp20),$tp40
        shr     \$7,$tp88
@@ -1036,15 +1046,15 @@ $code.=<<___;
        and     $maskfe,$tp48
        and     $mask1b,$acc0
        and     $mask1b,$acc8
-       xor     $tp40,$acc0
-       xor     $tp48,$acc8
-       mov     $acc0,$tp40
-       mov     $acc8,$tp48
-
-       and     $mask80,$acc0
-       and     $mask80,$acc8
-       mov     $acc0,$tp80
-       mov     $acc8,$tp88
+       xor     $acc0,$tp40
+       xor     $acc8,$tp48
+       mov     $mask80,$tp80
+       mov     $mask80,$tp88
+
+       and     $tp40,$tp80
+       and     $tp48,$tp88
+       mov     $tp80,$acc0
+       mov     $tp88,$acc8
        shr     \$7,$tp80
         xor    $tp10,$tp20             # tp2^=tp1
        shr     \$7,$tp88
@@ -1069,51 +1079,51 @@ $code.=<<___;
        mov     $tp10,$acc0
        mov     $tp18,$acc8
        xor     $tp80,$tp40             # tp4^tp1^=tp8
-       xor     $tp88,$tp48             # tp4^tp1^=tp8
        shr     \$32,$acc0
+       xor     $tp88,$tp48             # tp4^tp1^=tp8
        shr     \$32,$acc8
        xor     $tp20,$tp80             # tp8^=tp8^tp2^tp1=tp2^tp1
-       xor     $tp28,$tp88             # tp8^=tp8^tp2^tp1=tp2^tp1
        rol     \$8,`&LO("$tp10")`      # ROTATE(tp1^tp8,8)
+       xor     $tp28,$tp88             # tp8^=tp8^tp2^tp1=tp2^tp1
        rol     \$8,`&LO("$tp18")`      # ROTATE(tp1^tp8,8)
        xor     $tp40,$tp80             # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+       rol     \$8,`&LO("$acc0")`      # ROTATE(tp1^tp8,8)
        xor     $tp48,$tp88             # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
 
-       rol     \$8,`&LO("$acc0")`      # ROTATE(tp1^tp8,8)
        rol     \$8,`&LO("$acc8")`      # ROTATE(tp1^tp8,8)
        xor     `&LO("$tp80")`,`&LO("$tp10")`
-       xor     `&LO("$tp88")`,`&LO("$tp18")`
        shr     \$32,$tp80
+       xor     `&LO("$tp88")`,`&LO("$tp18")`
        shr     \$32,$tp88
        xor     `&LO("$tp80")`,`&LO("$acc0")`
        xor     `&LO("$tp88")`,`&LO("$acc8")`
 
        mov     $tp20,$tp80
-       mov     $tp28,$tp88
-       shr     \$32,$tp80
-       shr     \$32,$tp88
        rol     \$24,`&LO("$tp20")`     # ROTATE(tp2^tp1^tp8,24)
+       mov     $tp28,$tp88
        rol     \$24,`&LO("$tp28")`     # ROTATE(tp2^tp1^tp8,24)
-       rol     \$24,`&LO("$tp80")`     # ROTATE(tp2^tp1^tp8,24)
-       rol     \$24,`&LO("$tp88")`     # ROTATE(tp2^tp1^tp8,24)
+       shr     \$32,$tp80
        xor     `&LO("$tp20")`,`&LO("$tp10")`
+       shr     \$32,$tp88
        xor     `&LO("$tp28")`,`&LO("$tp18")`
+       rol     \$24,`&LO("$tp80")`     # ROTATE(tp2^tp1^tp8,24)
        mov     $tp40,$tp20
+       rol     \$24,`&LO("$tp88")`     # ROTATE(tp2^tp1^tp8,24)
        mov     $tp48,$tp28
+       shr     \$32,$tp20
        xor     `&LO("$tp80")`,`&LO("$acc0")`
+       shr     \$32,$tp28
        xor     `&LO("$tp88")`,`&LO("$acc8")`
 
        `"mov   0($sbox),$mask80"       if ($prefetch)`
-       shr     \$32,$tp20
-       shr     \$32,$tp28
-       `"mov   64($sbox),$maskfe"      if ($prefetch)`
        rol     \$16,`&LO("$tp40")`     # ROTATE(tp4^tp1^tp8,16)
+       `"mov   64($sbox),$maskfe"      if ($prefetch)`
        rol     \$16,`&LO("$tp48")`     # ROTATE(tp4^tp1^tp8,16)
        `"mov   128($sbox),$mask1b"     if ($prefetch)`
        rol     \$16,`&LO("$tp20")`     # ROTATE(tp4^tp1^tp8,16)
-       rol     \$16,`&LO("$tp28")`     # ROTATE(tp4^tp1^tp8,16)
        `"mov   192($sbox),$tp80"       if ($prefetch)`
        xor     `&LO("$tp40")`,`&LO("$tp10")`
+       rol     \$16,`&LO("$tp28")`     # ROTATE(tp4^tp1^tp8,16)
        xor     `&LO("$tp48")`,`&LO("$tp18")`
        `"mov   256($sbox),$tp88"       if ($prefetch)`
        xor     `&LO("$tp20")`,`&LO("$acc0")`
@@ -1175,6 +1185,9 @@ $code.=<<___;
 .globl AES_decrypt
 .type  AES_decrypt,\@function,3
 .align 16
+.globl asm_AES_decrypt
+.hidden        asm_AES_decrypt
+asm_AES_decrypt:
 AES_decrypt:
        push    %rbx
        push    %rbp
@@ -1184,18 +1197,20 @@ AES_decrypt:
        push    %r15
 
        # allocate frame "above" key schedule
-       mov     %rsp,%rax
-       mov     %rdx,$key
-       lea     -63(%rdx),%rcx
+       mov     %rsp,%r10
+       lea     -63(%rdx),%rcx  # %rdx is key argument
        and     \$-64,%rsp
        sub     %rsp,%rcx
        neg     %rcx
        and     \$0x3c0,%rcx
        sub     %rcx,%rsp
+       sub     \$32,%rsp
 
-       push    %rax            # save real stack pointer
-       push    %rsi            # save out
+       mov     %rsi,16(%rsp)   # save out
+       mov     %r10,24(%rsp)   # save real stack pointer
+.Ldec_prologue:
 
+       mov     %rdx,$key
        mov     240($key),$rnds # load rounds
 
        mov     0(%rdi),$s0     # load input vector
@@ -1205,8 +1220,8 @@ AES_decrypt:
 
        shl     \$4,$rnds
        lea     ($key,$rnds),%rbp
-       push    %rbp
-       push    $key
+       mov     $key,(%rsp)     # key schedule
+       mov     %rbp,8(%rsp)    # end of key schedule
 
        # pick Td4 copy which can't "overlap" with stack frame or key schedule
        lea     .LAES_Td+2048(%rip),$sbox
@@ -1220,18 +1235,20 @@ AES_decrypt:
        call    _x86_64_AES_decrypt_compact
 
        mov     16(%rsp),$out   # restore out
-       mov     24(%rsp),%rsp
+       mov     24(%rsp),%rsi   # restore saved stack pointer
        mov     $s0,0($out)     # write output vector
        mov     $s1,4($out)
        mov     $s2,8($out)
        mov     $s3,12($out)
 
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
+       mov     (%rsi),%r15
+       mov     8(%rsi),%r14
+       mov     16(%rsi),%r13
+       mov     24(%rsi),%r12
+       mov     32(%rsi),%rbp
+       mov     40(%rsi),%rbx
+       lea     48(%rsi),%rsp
+.Ldec_epilogue:
        ret
 .size  AES_decrypt,.-AES_decrypt
 ___
@@ -1271,16 +1288,27 @@ $code.=<<___;
 .type  AES_set_encrypt_key,\@function,3
 .align 16
 AES_set_encrypt_key:
+       push    %rbx
+       push    %rbp
+       push    %r12                    # redundant, but allows to share 
+       push    %r13                    # exception handler...
+       push    %r14
+       push    %r15
+       sub     \$8,%rsp
+.Lenc_key_prologue:
+
        call    _x86_64_AES_set_encrypt_key
+
+       mov     40(%rsp),%rbp
+       mov     48(%rsp),%rbx
+       add     \$56,%rsp
+.Lenc_key_epilogue:
        ret
 .size  AES_set_encrypt_key,.-AES_set_encrypt_key
 
 .type  _x86_64_AES_set_encrypt_key,\@abi-omnipotent
 .align 16
 _x86_64_AES_set_encrypt_key:
-       push    %rbx
-       push    %rbp
-
        mov     %esi,%ecx                       # %ecx=bits
        mov     %rdi,%rsi                       # %rsi=userKey
        mov     %rdx,%rdi                       # %rdi=key
@@ -1461,8 +1489,6 @@ $code.=<<___;
 .Lbadpointer:
        mov     \$-1,%rax
 .Lexit:
-       pop     %rbp
-       pop     %rbx
        .byte   0xf3,0xc3                       # rep ret
 .size  _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
 ___
@@ -1528,18 +1554,19 @@ $code.=<<___;
 .type  AES_set_decrypt_key,\@function,3
 .align 16
 AES_set_decrypt_key:
-       push    %rdx                    # save key schedule
-       call    _x86_64_AES_set_encrypt_key
-       cmp     \$0,%eax
-       pop     %r8                     # restore key schedule
-       jne     .Labort
-
        push    %rbx
        push    %rbp
        push    %r12
        push    %r13
        push    %r14
        push    %r15
+       push    %rdx                    # save key schedule
+.Ldec_key_prologue:
+
+       call    _x86_64_AES_set_encrypt_key
+       mov     (%rsp),%r8              # restore key schedule
+       cmp     \$0,%eax
+       jne     .Labort
 
        mov     240(%r8),%r14d          # pull number of rounds
        xor     %rdi,%rdi
@@ -1585,13 +1612,15 @@ $code.=<<___;
        jnz     .Lpermute
 
        xor     %rax,%rax
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
 .Labort:
+       mov     8(%rsp),%r15
+       mov     16(%rsp),%r14
+       mov     24(%rsp),%r13
+       mov     32(%rsp),%r12
+       mov     40(%rsp),%rbp
+       mov     48(%rsp),%rbx
+       add     \$56,%rsp
+.Ldec_key_epilogue:
        ret
 .size  AES_set_decrypt_key,.-AES_set_decrypt_key
 ___
@@ -1619,16 +1648,21 @@ $code.=<<___;
 .type  AES_cbc_encrypt,\@function,6
 .align 16
 .extern        OPENSSL_ia32cap_P
+.globl asm_AES_cbc_encrypt
+.hidden        asm_AES_cbc_encrypt
+asm_AES_cbc_encrypt:
 AES_cbc_encrypt:
        cmp     \$0,%rdx        # check length
-       je      .Lcbc_just_ret
+       je      .Lcbc_epilogue
+       pushfq
        push    %rbx
        push    %rbp
        push    %r12
        push    %r13
        push    %r14
        push    %r15
-       pushfq
+.Lcbc_prologue:
+
        cld
        mov     %r9d,%r9d       # clear upper half of enc
 
@@ -1638,13 +1672,13 @@ AES_cbc_encrypt:
        lea     .LAES_Td(%rip),$sbox
 .Lcbc_picked_te:
 
-       mov     OPENSSL_ia32cap_P(%rip),%eax
+       mov     OPENSSL_ia32cap_P(%rip),%r10d
        cmp     \$$speed_limit,%rdx
-       jb      .Lcbc_slow_way
+       jb      .Lcbc_slow_prologue
        test    \$15,%rdx
-       jnz     .Lcbc_slow_way
-       bt      \$28,%eax
-       jc      .Lcbc_slow_way
+       jnz     .Lcbc_slow_prologue
+       bt      \$28,%r10d
+       jc      .Lcbc_slow_prologue
 
        # allocate aligned stack frame...
        lea     -88-248(%rsp),$key
@@ -1672,8 +1706,9 @@ AES_cbc_encrypt:
 .Lcbc_te_ok:
 
        xchg    %rsp,$key
-       add     \$8,%rsp        # reserve for return address!
+       #add    \$8,%rsp        # reserve for return address!
        mov     $key,$_rsp      # save %rsp
+.Lcbc_fast_body:
        mov     %rdi,$_inp      # save copy of inp
        mov     %rsi,$_out      # save copy of out
        mov     %rdx,$_len      # save copy of len
@@ -1757,25 +1792,7 @@ AES_cbc_encrypt:
        mov     $s2,8(%rbp)
        mov     $s3,12(%rbp)
 
-.align 4
-.Lcbc_cleanup:
-       cmpl    \$0,$mark       # was the key schedule copied?
-       lea     $aes_key,%rdi
-       je      .Lcbc_exit
-               mov     \$240/8,%ecx
-               xor     %rax,%rax
-               .long   0x90AB48F3      # rep stosq
-.Lcbc_exit:
-       mov     $_rsp,%rsp
-       popfq
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
-.Lcbc_just_ret:
-       ret
+       jmp     .Lcbc_fast_cleanup
 
 #----------------------------- DECRYPT -----------------------------#
 .align 16
@@ -1821,7 +1838,7 @@ AES_cbc_encrypt:
        mov     8(%rbp),%r11
        mov     %r10,0(%r12)            # copy back to user
        mov     %r11,8(%r12)
-       jmp     .Lcbc_cleanup
+       jmp     .Lcbc_fast_cleanup
 
 .align 16
 .Lcbc_fast_dec_in_place:
@@ -1874,24 +1891,34 @@ AES_cbc_encrypt:
        mov     $s2,8($out)
        mov     $s3,12($out)
 
-       jmp     .Lcbc_cleanup
+.align 4
+.Lcbc_fast_cleanup:
+       cmpl    \$0,$mark       # was the key schedule copied?
+       lea     $aes_key,%rdi
+       je      .Lcbc_exit
+               mov     \$240/8,%ecx
+               xor     %rax,%rax
+               .long   0x90AB48F3      # rep stosq
+
+       jmp     .Lcbc_exit
 
 #--------------------------- SLOW ROUTINE ---------------------------#
 .align 16
-.Lcbc_slow_way:
+.Lcbc_slow_prologue:
        # allocate aligned stack frame...
        lea     -88(%rsp),%rbp
        and     \$-64,%rbp
        # ... just "above" key schedule
-       lea     -88-63(%rcx),%rax
-       sub     %rbp,%rax
-       neg     %rax
-       and     \$0x3c0,%rax
-       sub     %rax,%rbp
+       lea     -88-63(%rcx),%r10
+       sub     %rbp,%r10
+       neg     %r10
+       and     \$0x3c0,%r10
+       sub     %r10,%rbp
 
        xchg    %rsp,%rbp
-       add     \$8,%rsp        # reserve for return address!
+       #add    \$8,%rsp        # reserve for return address!
        mov     %rbp,$_rsp      # save %rsp
+.Lcbc_slow_body:
        #mov    %rdi,$_inp      # save copy of inp
        #mov    %rsi,$_out      # save copy of out
        #mov    %rdx,$_len      # save copy of len
@@ -1963,8 +1990,11 @@ AES_cbc_encrypt:
        mov     $s3,12(%rbp)
 
        jmp     .Lcbc_exit
+
 .align 4
 .Lcbc_slow_enc_tail:
+       mov     %rax,%r11
+       mov     %rcx,%r12
        mov     %r10,%rcx
        mov     $inp,%rsi
        mov     $out,%rdi
@@ -1975,6 +2005,8 @@ AES_cbc_encrypt:
        .long   0x9066AAF3              # rep stosb
        mov     $out,$inp               # this is not a mistake!
        mov     \$16,%r10               # len=16
+       mov     %r11,%rax
+       mov     %r12,%rcx
        jmp     .Lcbc_slow_enc_loop     # one more spin...
 #--------------------------- SLOW DECRYPT ---------------------------#
 .align 16
@@ -2053,6 +2085,21 @@ AES_cbc_encrypt:
        lea     16(%r10),%rcx
        .long   0x9066A4F3      # rep movsb
        jmp     .Lcbc_exit
+
+.align 16
+.Lcbc_exit:
+       mov     $_rsp,%rsi
+       mov     (%rsi),%r15
+       mov     8(%rsi),%r14
+       mov     16(%rsi),%r13
+       mov     24(%rsi),%r12
+       mov     32(%rsi),%rbp
+       mov     40(%rsi),%rbx
+       lea     48(%rsi),%rsp
+.Lcbc_popfq:
+       popfq
+.Lcbc_epilogue:
+       ret
 .size  AES_cbc_encrypt,.-AES_cbc_encrypt
 ___
 }
@@ -2482,6 +2529,282 @@ $code.=<<___;
 .align 64
 ___
 
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#              CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern        __imp_RtlVirtualUnwind
+.type  block_se_handler,\@abi-omnipotent
+.align 16
+block_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lin_block_prologue
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lin_block_prologue
+
+       mov     24(%rax),%rax           # pull saved real stack pointer
+       lea     48(%rax),%rax           # adjust...
+
+       mov     -8(%rax),%rbx
+       mov     -16(%rax),%rbp
+       mov     -24(%rax),%r12
+       mov     -32(%rax),%r13
+       mov     -40(%rax),%r14
+       mov     -48(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R15
+
+.Lin_block_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       jmp     .Lcommon_seh_exit
+.size  block_se_handler,.-block_se_handler
+
+.type  key_se_handler,\@abi-omnipotent
+.align 16
+key_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lin_key_prologue
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lin_key_prologue
+
+       lea     56(%rax),%rax
+
+       mov     -8(%rax),%rbx
+       mov     -16(%rax),%rbp
+       mov     -24(%rax),%r12
+       mov     -32(%rax),%r13
+       mov     -40(%rax),%r14
+       mov     -48(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R15
+
+.Lin_key_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       jmp     .Lcommon_seh_exit
+.size  key_se_handler,.-key_se_handler
+
+.type  cbc_se_handler,\@abi-omnipotent
+.align 16
+cbc_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       lea     .Lcbc_prologue(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_prologue
+       jb      .Lin_cbc_prologue
+
+       lea     .Lcbc_fast_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_fast_body
+       jb      .Lin_cbc_frame_setup
+
+       lea     .Lcbc_slow_prologue(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_slow_prologue
+       jb      .Lin_cbc_body
+
+       lea     .Lcbc_slow_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_slow_body
+       jb      .Lin_cbc_frame_setup
+
+.Lin_cbc_body:
+       mov     152($context),%rax      # pull context->Rsp
+
+       lea     .Lcbc_epilogue(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip>=.Lcbc_epilogue
+       jae     .Lin_cbc_prologue
+
+       lea     8(%rax),%rax
+
+       lea     .Lcbc_popfq(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip>=.Lcbc_popfq
+       jae     .Lin_cbc_prologue
+
+       mov     `16-8`(%rax),%rax       # biased $_rsp
+       lea     56(%rax),%rax
+
+.Lin_cbc_frame_setup:
+       mov     -16(%rax),%rbx
+       mov     -24(%rax),%rbp
+       mov     -32(%rax),%r12
+       mov     -40(%rax),%r13
+       mov     -48(%rax),%r14
+       mov     -56(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R15
+
+.Lin_cbc_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+.Lcommon_seh_exit:
+
+       mov     40($disp),%rdi          # disp->ContextRecord
+       mov     $context,%rsi           # context
+       mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
+       .long   0xa548f3fc              # cld; rep movsq
+
+       mov     $disp,%rsi
+       xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
+       mov     8(%rsi),%rdx            # arg2, disp->ImageBase
+       mov     0(%rsi),%r8             # arg3, disp->ControlPc
+       mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
+       mov     40(%rsi),%r10           # disp->ContextRecord
+       lea     56(%rsi),%r11           # &disp->HandlerData
+       lea     24(%rsi),%r12           # &disp->EstablisherFrame
+       mov     %r10,32(%rsp)           # arg5
+       mov     %r11,40(%rsp)           # arg6
+       mov     %r12,48(%rsp)           # arg7
+       mov     %rcx,56(%rsp)           # arg8, (NULL)
+       call    *__imp_RtlVirtualUnwind(%rip)
+
+       mov     \$1,%eax                # ExceptionContinueSearch
+       add     \$64,%rsp
+       popfq
+       pop     %r15
+       pop     %r14
+       pop     %r13
+       pop     %r12
+       pop     %rbp
+       pop     %rbx
+       pop     %rdi
+       pop     %rsi
+       ret
+.size  cbc_se_handler,.-cbc_se_handler
+
+.section       .pdata
+.align 4
+       .rva    .LSEH_begin_AES_encrypt
+       .rva    .LSEH_end_AES_encrypt
+       .rva    .LSEH_info_AES_encrypt
+
+       .rva    .LSEH_begin_AES_decrypt
+       .rva    .LSEH_end_AES_decrypt
+       .rva    .LSEH_info_AES_decrypt
+
+       .rva    .LSEH_begin_AES_set_encrypt_key
+       .rva    .LSEH_end_AES_set_encrypt_key
+       .rva    .LSEH_info_AES_set_encrypt_key
+
+       .rva    .LSEH_begin_AES_set_decrypt_key
+       .rva    .LSEH_end_AES_set_decrypt_key
+       .rva    .LSEH_info_AES_set_decrypt_key
+
+       .rva    .LSEH_begin_AES_cbc_encrypt
+       .rva    .LSEH_end_AES_cbc_encrypt
+       .rva    .LSEH_info_AES_cbc_encrypt
+
+.section       .xdata
+.align 8
+.LSEH_info_AES_encrypt:
+       .byte   9,0,0,0
+       .rva    block_se_handler
+       .rva    .Lenc_prologue,.Lenc_epilogue   # HandlerData[]
+.LSEH_info_AES_decrypt:
+       .byte   9,0,0,0
+       .rva    block_se_handler
+       .rva    .Ldec_prologue,.Ldec_epilogue   # HandlerData[]
+.LSEH_info_AES_set_encrypt_key:
+       .byte   9,0,0,0
+       .rva    key_se_handler
+       .rva    .Lenc_key_prologue,.Lenc_key_epilogue   # HandlerData[]
+.LSEH_info_AES_set_decrypt_key:
+       .byte   9,0,0,0
+       .rva    key_se_handler
+       .rva    .Ldec_key_prologue,.Ldec_key_epilogue   # HandlerData[]
+.LSEH_info_AES_cbc_encrypt:
+       .byte   9,0,0,0
+       .rva    cbc_se_handler
+___
+}
+
 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
 
 print $code;