Non-executable stack in asm.
[openssl.git] / crypto / aes / asm / aes-x86_64.pl
index 75747ed..a757b47 100755 (executable)
@@ -7,7 +7,7 @@
 # details see http://www.openssl.org/~appro/cryptogams/.
 # ====================================================================
 #
-# Version 2.0.
+# Version 2.1.
 #
 # aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
 # Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
 #
 #              ECB encrypt     ECB decrypt     CBC large chunk
 # AMD64                33              41              13.0
-# EM64T                38              59              18.6
-# Core 2       30              43              14.5
+# EM64T                38              59              18.6(*)
+# Core 2       30              43              14.5(*)
+#
+# (*) with hyper-threading off
 
-$verticalspin=1;       # unlike 32-bit version $verticalspin performs
-                       # ~15% better on both AMD and Intel cores
-$output=shift;
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
 
 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
 die "can't locate x86_64-xlate.pl";
 
-open STDOUT,"| $^X $xlate $output";
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$verticalspin=1;       # unlike 32-bit version $verticalspin performs
+                       # ~15% better on both AMD and Intel cores
+$speed_limit=512;      # see aes-586.pl for details
 
-$code=".text\n";
+$code=".section .note.GNU-stack,\"\",\@progbits\n";
+$code.=".text\n";
 
 $s0="%eax";
 $s1="%ebx";
@@ -145,22 +154,17 @@ $code.=<<___;
        movzb   `&lo("$s0")`,$acc0
        movzb   `&lo("$s1")`,$acc1
        movzb   `&lo("$s2")`,$acc2
-       mov     2($sbox,$acc0,8),$t0
-       mov     2($sbox,$acc1,8),$t1
-       mov     2($sbox,$acc2,8),$t2
-
-       and     \$0x000000ff,$t0
-       and     \$0x000000ff,$t1
-       and     \$0x000000ff,$t2
+       movzb   2($sbox,$acc0,8),$t0
+       movzb   2($sbox,$acc1,8),$t1
+       movzb   2($sbox,$acc2,8),$t2
 
        movzb   `&lo("$s3")`,$acc0
        movzb   `&hi("$s1")`,$acc1
        movzb   `&hi("$s2")`,$acc2
-       mov     2($sbox,$acc0,8),$t3
+       movzb   2($sbox,$acc0,8),$t3
        mov     0($sbox,$acc1,8),$acc1  #$t0
        mov     0($sbox,$acc2,8),$acc2  #$t1
 
-       and     \$0x000000ff,$t3
        and     \$0x0000ff00,$acc1
        and     \$0x0000ff00,$acc2
 
@@ -594,18 +598,20 @@ AES_encrypt:
        push    %r15
 
        # allocate frame "above" key schedule
-       mov     %rsp,%rax
-       mov     %rdx,$key
-       lea     -63(%rdx),%rcx
+       mov     %rsp,%r10
+       lea     -63(%rdx),%rcx  # %rdx is key argument
        and     \$-64,%rsp
        sub     %rsp,%rcx
        neg     %rcx
        and     \$0x3c0,%rcx
        sub     %rcx,%rsp
+       sub     \$32,%rsp
 
-       push    %rax            # save real stack pointer
-       push    %rsi            # save out
+       mov     %rsi,16(%rsp)   # save out
+       mov     %r10,24(%rsp)   # save real stack pointer
+.Lenc_prologue:
 
+       mov     %rdx,$key
        mov     240($key),$rnds # load rounds
 
        mov     0(%rdi),$s0     # load input vector
@@ -615,33 +621,33 @@ AES_encrypt:
 
        shl     \$4,$rnds
        lea     ($key,$rnds),%rbp
-       push    %rbp
-       push    $key
+       mov     $key,(%rsp)     # key schedule
+       mov     %rbp,8(%rsp)    # end of key schedule
 
        # pick Te4 copy which can't "overlap" with stack frame or key schedule
-       .picmeup        $sbox
-       lea     AES_Te+2048-.($sbox),$sbox
-       lea     768-32(%rsp),%rbp
+       lea     .LAES_Te+2048(%rip),$sbox
+       lea     768(%rsp),%rbp
        sub     $sbox,%rbp
        and     \$0x300,%rbp
        lea     ($sbox,%rbp),$sbox
 
        call    _x86_64_AES_encrypt_compact
 
-       lea     16(%rsp),%rsp
-       pop     $out            # restore out
+       mov     16(%rsp),$out   # restore out
+       mov     24(%rsp),%rsi   # restore saved stack pointer
        mov     $s0,0($out)     # write output vector
        mov     $s1,4($out)
        mov     $s2,8($out)
        mov     $s3,12($out)
 
-       mov     (%rsp),%rsp
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
+       mov     (%rsi),%r15
+       mov     8(%rsi),%r14
+       mov     16(%rsi),%r13
+       mov     24(%rsi),%r12
+       mov     32(%rsi),%rbp
+       mov     40(%rsi),%rbx
+       lea     48(%rsi),%rsp
+.Lenc_epilogue:
        ret
 .size  AES_encrypt,.-AES_encrypt
 ___
@@ -1188,18 +1194,20 @@ AES_decrypt:
        push    %r15
 
        # allocate frame "above" key schedule
-       mov     %rsp,%rax
-       mov     %rdx,$key
-       lea     -63(%rdx),%rcx
+       mov     %rsp,%r10
+       lea     -63(%rdx),%rcx  # %rdx is key argument
        and     \$-64,%rsp
        sub     %rsp,%rcx
        neg     %rcx
        and     \$0x3c0,%rcx
        sub     %rcx,%rsp
+       sub     \$32,%rsp
 
-       push    %rax            # save real stack pointer
-       push    %rsi            # save out
+       mov     %rsi,16(%rsp)   # save out
+       mov     %r10,24(%rsp)   # save real stack pointer
+.Ldec_prologue:
 
+       mov     %rdx,$key
        mov     240($key),$rnds # load rounds
 
        mov     0(%rdi),$s0     # load input vector
@@ -1209,35 +1217,35 @@ AES_decrypt:
 
        shl     \$4,$rnds
        lea     ($key,$rnds),%rbp
-       push    %rbp
-       push    $key
+       mov     $key,(%rsp)     # key schedule
+       mov     %rbp,8(%rsp)    # end of key schedule
 
        # pick Td4 copy which can't "overlap" with stack frame or key schedule
-       .picmeup        $sbox
-       lea     AES_Td+2048-.($sbox),$sbox
-       lea     768-32(%rsp),%rbp
+       lea     .LAES_Td+2048(%rip),$sbox
+       lea     768(%rsp),%rbp
        sub     $sbox,%rbp
        and     \$0x300,%rbp
        lea     ($sbox,%rbp),$sbox
-       shr     \$3,%rbp                # recall "magic" constants!
+       shr     \$3,%rbp        # recall "magic" constants!
        add     %rbp,$sbox
 
        call    _x86_64_AES_decrypt_compact
 
-       lea     16(%rsp),%rsp
-       pop     $out    # restore out
-       mov     $s0,0($out)
+       mov     16(%rsp),$out   # restore out
+       mov     24(%rsp),%rsi   # restore saved stack pointer
+       mov     $s0,0($out)     # write output vector
        mov     $s1,4($out)
        mov     $s2,8($out)
        mov     $s3,12($out)
 
-       mov     (%rsp),%rsp
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
+       mov     (%rsi),%r15
+       mov     8(%rsi),%r14
+       mov     16(%rsi),%r13
+       mov     24(%rsi),%r12
+       mov     32(%rsi),%rbp
+       mov     40(%rsi),%rbx
+       lea     48(%rsi),%rsp
+.Ldec_epilogue:
        ret
 .size  AES_decrypt,.-AES_decrypt
 ___
@@ -1279,7 +1287,29 @@ $code.=<<___;
 AES_set_encrypt_key:
        push    %rbx
        push    %rbp
+       push    %r12                    # redundant, but allows to share 
+       push    %r13                    # exception handler...
+       push    %r14
+       push    %r15
+       sub     \$8,%rsp
+.Lenc_key_prologue:
+
+       call    _x86_64_AES_set_encrypt_key
+
+       mov     8(%rsp),%r15
+       mov     16(%rsp),%r14
+       mov     24(%rsp),%r13
+       mov     32(%rsp),%r12
+       mov     40(%rsp),%rbp
+       mov     48(%rsp),%rbx
+       add     \$56,%rsp
+.Lenc_key_epilogue:
+       ret
+.size  AES_set_encrypt_key,.-AES_set_encrypt_key
 
+.type  _x86_64_AES_set_encrypt_key,\@abi-omnipotent
+.align 16
+_x86_64_AES_set_encrypt_key:
        mov     %esi,%ecx                       # %ecx=bits
        mov     %rdi,%rsi                       # %rsi=userKey
        mov     %rdx,%rdi                       # %rdi=key
@@ -1289,8 +1319,7 @@ AES_set_encrypt_key:
        test    \$-1,%rdi
        jz      .Lbadpointer
 
-       .picmeup %rbp
-       lea     AES_Te-.(%rbp),%rbp
+       lea     .LAES_Te(%rip),%rbp
        lea     2048+128(%rbp),%rbp
 
        # prefetch Te4
@@ -1313,15 +1342,12 @@ AES_set_encrypt_key:
        jmp     .Lexit
 
 .L10rounds:
-       mov     0(%rsi),%eax                    # copy first 4 dwords
-       mov     4(%rsi),%ebx
-       mov     8(%rsi),%ecx
-       mov     12(%rsi),%edx
-       mov     %eax,0(%rdi)
-       mov     %ebx,4(%rdi)
-       mov     %ecx,8(%rdi)
-       mov     %edx,12(%rdi)
+       mov     0(%rsi),%rax                    # copy first 4 dwords
+       mov     8(%rsi),%rdx
+       mov     %rax,0(%rdi)
+       mov     %rdx,8(%rdi)
 
+       shr     \$32,%rdx
        xor     %ecx,%ecx
        jmp     .L10shortcut
 .align 4
@@ -1349,19 +1375,14 @@ $code.=<<___;
        jmp     .Lexit
 
 .L12rounds:
-       mov     0(%rsi),%eax                    # copy first 6 dwords
-       mov     4(%rsi),%ebx
-       mov     8(%rsi),%ecx
-       mov     12(%rsi),%edx
-       mov     %eax,0(%rdi)
-       mov     %ebx,4(%rdi)
-       mov     %ecx,8(%rdi)
-       mov     %edx,12(%rdi)
-       mov     16(%rsi),%ecx
-       mov     20(%rsi),%edx
-       mov     %ecx,16(%rdi)
-       mov     %edx,20(%rdi)
-
+       mov     0(%rsi),%rax                    # copy first 6 dwords
+       mov     8(%rsi),%rbx
+       mov     16(%rsi),%rdx
+       mov     %rax,0(%rdi)
+       mov     %rbx,8(%rdi)
+       mov     %rdx,16(%rdi)
+
+       shr     \$32,%rdx
        xor     %ecx,%ecx
        jmp     .L12shortcut
 .align 4
@@ -1397,30 +1418,23 @@ $code.=<<___;
        jmp     .Lexit
 
 .L14rounds:            
-       mov     0(%rsi),%eax                    # copy first 8 dwords
-       mov     4(%rsi),%ebx
-       mov     8(%rsi),%ecx
-       mov     12(%rsi),%edx
-       mov     %eax,0(%rdi)
-       mov     %ebx,4(%rdi)
-       mov     %ecx,8(%rdi)
-       mov     %edx,12(%rdi)
-       mov     16(%rsi),%eax
-       mov     20(%rsi),%ebx
-       mov     24(%rsi),%ecx
-       mov     28(%rsi),%edx
-       mov     %eax,16(%rdi)
-       mov     %ebx,20(%rdi)
-       mov     %ecx,24(%rdi)
-       mov     %edx,28(%rdi)
-
+       mov     0(%rsi),%rax                    # copy first 8 dwords
+       mov     8(%rsi),%rbx
+       mov     16(%rsi),%rcx
+       mov     24(%rsi),%rdx
+       mov     %rax,0(%rdi)
+       mov     %rbx,8(%rdi)
+       mov     %rcx,16(%rdi)
+       mov     %rdx,24(%rdi)
+
+       shr     \$32,%rdx
        xor     %ecx,%ecx
        jmp     .L14shortcut
 .align 4
 .L14loop:
+               mov     0(%rdi),%eax                    # rk[0]
                mov     28(%rdi),%edx                   # rk[4]
 .L14shortcut:
-               mov     0(%rdi),%eax                    # rk[0]
 ___
                &enckey ();
 $code.=<<___;
@@ -1476,10 +1490,8 @@ $code.=<<___;
 .Lbadpointer:
        mov     \$-1,%rax
 .Lexit:
-       pop     %rbp
-       pop     %rbx
-       ret
-.size  AES_set_encrypt_key,.-AES_set_encrypt_key
+       .byte   0xf3,0xc3                       # rep ret
+.size  _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
 ___
 
 sub deckey_ref()
@@ -1543,18 +1555,19 @@ $code.=<<___;
 .type  AES_set_decrypt_key,\@function,3
 .align 16
 AES_set_decrypt_key:
-       push    %rdx                    # save key schedule
-       call    AES_set_encrypt_key
-       cmp     \$0,%eax
-       pop     %r8                     # restore key schedule
-       jne     .Labort
-
        push    %rbx
        push    %rbp
        push    %r12
        push    %r13
        push    %r14
        push    %r15
+       push    %rdx                    # save key schedule
+.Ldec_key_prologue:
+
+       call    _x86_64_AES_set_encrypt_key
+       mov     (%rsp),%r8              # restore key schedule
+       cmp     \$0,%eax
+       jne     .Labort
 
        mov     240(%r8),%r14d          # pull number of rounds
        xor     %rdi,%rdi
@@ -1576,8 +1589,7 @@ AES_set_decrypt_key:
                cmp     %rsi,%rdi
        jne     .Linvert
 
-       .picmeup %rax
-       lea     AES_Te+2048+1024-.(%rax),%rax   # rcon
+       lea     .LAES_Te+2048+1024(%rip),%rax   # rcon
 
        mov     40(%rax),$mask80
        mov     48(%rax),$maskfe
@@ -1601,13 +1613,15 @@ $code.=<<___;
        jnz     .Lpermute
 
        xor     %rax,%rax
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
 .Labort:
+       mov     8(%rsp),%r15
+       mov     16(%rsp),%r14
+       mov     24(%rsp),%r13
+       mov     32(%rsp),%r12
+       mov     40(%rsp),%rbp
+       mov     48(%rsp),%rbx
+       add     \$56,%rsp
+.Ldec_key_epilogue:
        ret
 .size  AES_set_decrypt_key,.-AES_set_decrypt_key
 ___
@@ -1618,46 +1632,59 @@ ___
 {
 # stack frame layout
 # -8(%rsp)             return address
-my $_rsp="0(%rsp)";            # saved %rsp
-my $_len="8(%rsp)";            # copy of 3rd parameter, length
-my $_key="16(%rsp)";           # copy of 4th parameter, key
-my $_ivp="24(%rsp)";           # copy of 5th parameter, ivp
-my $keyp="32(%rsp)";           # one to pass as $key
-my $ivec="40(%rsp)";           # ivec[16]
-my $aes_key="56(%rsp)";                # copy of aes_key
-my $mark="56+240(%rsp)";       # copy of aes_key->rounds
+my $keyp="0(%rsp)";            # one to pass as $key
+my $keyend="8(%rsp)";          # &(keyp->rd_key[4*keyp->rounds])
+my $_rsp="16(%rsp)";           # saved %rsp
+my $_inp="24(%rsp)";           # copy of 1st parameter, inp
+my $_out="32(%rsp)";           # copy of 2nd parameter, out
+my $_len="40(%rsp)";           # copy of 3rd parameter, length
+my $_key="48(%rsp)";           # copy of 4th parameter, key
+my $_ivp="56(%rsp)";           # copy of 5th parameter, ivp
+my $ivec="64(%rsp)";           # ivec[16]
+my $aes_key="80(%rsp)";                # copy of aes_key
+my $mark="80+240(%rsp)";       # copy of aes_key->rounds
 
 $code.=<<___;
 .globl AES_cbc_encrypt
 .type  AES_cbc_encrypt,\@function,6
 .align 16
+.extern        OPENSSL_ia32cap_P
 AES_cbc_encrypt:
        cmp     \$0,%rdx        # check length
-       je      .Lcbc_just_ret
+       je      .Lcbc_epilogue
+       pushfq
        push    %rbx
        push    %rbp
        push    %r12
        push    %r13
        push    %r14
        push    %r15
-       pushfq
-       cld
+.Lcbc_prologue:
 
-       .picmeup $sbox
-.Lcbc_pic_point:
+       cld
+       mov     %r9d,%r9d       # clear upper half of enc
 
+       lea     .LAES_Te(%rip),$sbox
        cmp     \$0,%r9
-       je      .LDECRYPT
-
-       lea     AES_Te-.Lcbc_pic_point($sbox),$sbox
+       jne     .Lcbc_picked_te
+       lea     .LAES_Td(%rip),$sbox
+.Lcbc_picked_te:
+
+       mov     OPENSSL_ia32cap_P(%rip),%r10d
+       cmp     \$$speed_limit,%rdx
+       jb      .Lcbc_slow_prologue
+       test    \$15,%rdx
+       jnz     .Lcbc_slow_prologue
+       bt      \$28,%r10d
+       jc      .Lcbc_slow_prologue
 
        # allocate aligned stack frame...
-       lea     -64-248(%rsp),$key
+       lea     -88-248(%rsp),$key
        and     \$-64,$key
 
-       # ... and make it doesn't alias with AES_Te modulo 4096
+       # ... and make sure it doesn't alias with AES_T[ed] modulo 4096
        mov     $sbox,%r10
-       lea     2048($sbox),%r11
+       lea     2304($sbox),%r11
        mov     $key,%r12
        and     \$0xFFF,%r10    # s = $sbox&0xfff
        and     \$0xFFF,%r11    # e = ($sbox+2048)&0xfff
@@ -1677,22 +1704,27 @@ AES_cbc_encrypt:
 .Lcbc_te_ok:
 
        xchg    %rsp,$key
-       add     \$8,%rsp        # reserve for return address!
+       #add    \$8,%rsp        # reserve for return address!
        mov     $key,$_rsp      # save %rsp
+.Lcbc_fast_body:
+       mov     %rdi,$_inp      # save copy of inp
+       mov     %rsi,$_out      # save copy of out
        mov     %rdx,$_len      # save copy of len
        mov     %rcx,$_key      # save copy of key
        mov     %r8,$_ivp       # save copy of ivp
        movl    \$0,$mark       # copy of aes_key->rounds = 0;
        mov     %r8,%rbp        # rearrange input arguments
+       mov     %r9,%rbx
        mov     %rsi,$out
        mov     %rdi,$inp
        mov     %rcx,$key
 
+       mov     240($key),%eax          # key->rounds
        # do we copy key schedule to stack?
        mov     $key,%r10
        sub     $sbox,%r10
        and     \$0xfff,%r10
-       cmp     \$2048,%r10
+       cmp     \$2304,%r10
        jb      .Lcbc_do_ecopy
        cmp     \$4096-248,%r10
        jb      .Lcbc_skip_ecopy
@@ -1703,12 +1735,11 @@ AES_cbc_encrypt:
                lea     $aes_key,$key
                mov     \$240/8,%ecx
                .long   0x90A548F3      # rep movsq
-               mov     (%rsi),%eax     # copy aes_key->rounds
-               mov     %eax,(%rdi)
+               mov     %eax,(%rdi)     # copy aes_key->rounds
 .Lcbc_skip_ecopy:
        mov     $key,$keyp      # save key pointer
 
-       mov     \$16,%ecx
+       mov     \$18,%ecx
 .align 4
 .Lcbc_prefetch_te:
                mov     0($sbox),%r10
@@ -1718,183 +1749,77 @@ AES_cbc_encrypt:
                lea     128($sbox),$sbox
                sub     \$1,%ecx
        jnz     .Lcbc_prefetch_te
-       sub     \$2048,$sbox
+       lea     -2304($sbox),$sbox
 
-       test    \$-16,%rdx              # check upon length
-       mov     %rdx,%r10
+       cmp     \$0,%rbx
+       je      .LFAST_DECRYPT
+
+#----------------------------- ENCRYPT -----------------------------#
        mov     0(%rbp),$s0             # load iv
        mov     4(%rbp),$s1
        mov     8(%rbp),$s2
        mov     12(%rbp),$s3
-       jz      .Lcbc_enc_tail          # short input...
 
 .align 4
-.Lcbc_enc_loop:
+.Lcbc_fast_enc_loop:
                xor     0($inp),$s0
                xor     4($inp),$s1
                xor     8($inp),$s2
                xor     12($inp),$s3
-               mov     $inp,$ivec      # if ($verticalspin) save inp
-
                mov     $keyp,$key      # restore key
+               mov     $inp,$_inp      # if ($verticalspin) save inp
+
                call    _x86_64_AES_encrypt
 
-               mov     $ivec,$inp      # if ($verticalspin) restore inp
+               mov     $_inp,$inp      # if ($verticalspin) restore inp
+               mov     $_len,%r10
                mov     $s0,0($out)
                mov     $s1,4($out)
                mov     $s2,8($out)
                mov     $s3,12($out)
 
-               mov     $_len,%r10
                lea     16($inp),$inp
                lea     16($out),$out
                sub     \$16,%r10
                test    \$-16,%r10
                mov     %r10,$_len
-       jnz     .Lcbc_enc_loop
-       test    \$15,%r10
-       jnz     .Lcbc_enc_tail
+       jnz     .Lcbc_fast_enc_loop
        mov     $_ivp,%rbp      # restore ivp
        mov     $s0,0(%rbp)     # save ivec
        mov     $s1,4(%rbp)
        mov     $s2,8(%rbp)
        mov     $s3,12(%rbp)
 
-.align 4
-.Lcbc_cleanup:
-       cmpl    \$0,$mark       # was the key schedule copied?
-       lea     $aes_key,%rdi
-       mov     $_rsp,%rsp
-       je      .Lcbc_exit
-               mov     \$240/8,%ecx
-               xor     %rax,%rax
-               .long   0x90AB48F3      # rep stosq
-.Lcbc_exit:
-       popfq
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
-.Lcbc_just_ret:
-       ret
-.align 4
-.Lcbc_enc_tail:
-       cmp     $inp,$out
-       je      .Lcbc_enc_in_place
-       mov     %r10,%rcx
-       mov     $inp,%rsi
-       mov     $out,%rdi
-       .long   0xF689A4F3              # rep movsb
-.Lcbc_enc_in_place:
-       mov     \$16,%rcx               # zero tail
-       sub     %r10,%rcx
-       xor     %rax,%rax
-       .long   0xF689AAF3              # rep stosb
-       mov     $out,$inp               # this is not a mistake!
-       movq    \$16,$_len              # len=16
-       jmp     .Lcbc_enc_loop          # one more spin...
+       jmp     .Lcbc_fast_cleanup
+
 #----------------------------- DECRYPT -----------------------------#
 .align 16
-.LDECRYPT:
-       lea     AES_Td-.Lcbc_pic_point($sbox),$sbox
-
-       # allocate aligned stack frame...
-       lea     -64-248(%rsp),$key
-       and     \$-64,$key
-
-       # ... and make it doesn't alias with AES_Td modulo 4096
-       mov     $sbox,%r10
-       lea     2304($sbox),%r11
-       mov     $key,%r12
-       and     \$0xFFF,%r10    # s = $sbox&0xfff
-       and     \$0xFFF,%r11    # e = ($sbox+2048+256)&0xfff
-       and     \$0xFFF,%r12    # p = %rsp&0xfff
-
-       cmp     %r11,%r12       # if (p=>e) %rsp =- (p-e);
-       jb      .Lcbc_td_break_out
-       sub     %r11,%r12
-       sub     %r12,$key
-       jmp     .Lcbc_td_ok
-.Lcbc_td_break_out:            # else %rsp -= (p-s)&0xfff + framesz
-       sub     %r10,%r12
-       and     \$0xFFF,%r12
-       add     \$320,%r12
-       sub     %r12,$key
-.align 4
-.Lcbc_td_ok:
-
-       xchg    %rsp,$key
-       add     \$8,%rsp        # reserve for return address!
-       mov     $key,$_rsp      # save %rsp
-       mov     %rdx,$_len      # save copy of len
-       mov     %rcx,$_key      # save copy of key
-       mov     %r8,$_ivp       # save copy of ivp
-       movl    \$0,$mark       # copy of aes_key->rounds = 0;
-       mov     %r8,%rbp        # rearrange input arguments
-       mov     %rsi,$out
-       mov     %rdi,$inp
-       mov     %rcx,$key
-
-       # do we copy key schedule to stack?
-       mov     $key,%r10
-       sub     $sbox,%r10
-       and     \$0xfff,%r10
-       cmp     \$2304,%r10
-       jb      .Lcbc_do_dcopy
-       cmp     \$4096-248,%r10
-       jb      .Lcbc_skip_dcopy
-.align 4
-.Lcbc_do_dcopy:
-               mov     $key,%rsi
-               lea     $aes_key,%rdi
-               lea     $aes_key,$key
-               mov     \$240/8,%ecx
-               .long   0x90A548F3      # rep movsq
-               mov     (%rsi),%eax     # copy aes_key->rounds
-               mov     %eax,(%rdi)
-.Lcbc_skip_dcopy:
-       mov     $key,$keyp      # save key pointer
-
-       mov     \$18,%ecx
-.align 4
-.Lcbc_prefetch_td:
-               mov     0($sbox),%r10
-               mov     32($sbox),%r11
-               mov     64($sbox),%r12
-               mov     96($sbox),%r13
-               lea     128($sbox),$sbox
-               sub     \$1,%ecx
-       jnz     .Lcbc_prefetch_td
-       sub     \$2304,$sbox
-
+.LFAST_DECRYPT:
        cmp     $inp,$out
-       je      .Lcbc_dec_in_place
+       je      .Lcbc_fast_dec_in_place
 
        mov     %rbp,$ivec
 .align 4
-.Lcbc_dec_loop:
-               mov     0($inp),$s0             # read input
+.Lcbc_fast_dec_loop:
+               mov     0($inp),$s0     # read input
                mov     4($inp),$s1
                mov     8($inp),$s2
                mov     12($inp),$s3
-               mov     $inp,8+$ivec    # if ($verticalspin) save inp
-
                mov     $keyp,$key      # restore key
+               mov     $inp,$_inp      # if ($verticalspin) save inp
+
                call    _x86_64_AES_decrypt
 
                mov     $ivec,%rbp      # load ivp
-               mov     8+$ivec,$inp    # if ($verticalspin) restore inp
+               mov     $_inp,$inp      # if ($verticalspin) restore inp
+               mov     $_len,%r10      # load len
                xor     0(%rbp),$s0     # xor iv
                xor     4(%rbp),$s1
                xor     8(%rbp),$s2
                xor     12(%rbp),$s3
                mov     $inp,%rbp       # current input, next iv
 
-               mov     $_len,%r10      # load len
                sub     \$16,%r10
-               jc      .Lcbc_dec_partial
                mov     %r10,$_len      # update len
                mov     %rbp,$ivec      # update ivp
 
@@ -1905,81 +1830,281 @@ AES_cbc_encrypt:
 
                lea     16($inp),$inp
                lea     16($out),$out
-       jnz     .Lcbc_dec_loop
-.Lcbc_dec_end:
+       jnz     .Lcbc_fast_dec_loop
        mov     $_ivp,%r12              # load user ivp
        mov     0(%rbp),%r10            # load iv
        mov     8(%rbp),%r11
        mov     %r10,0(%r12)            # copy back to user
        mov     %r11,8(%r12)
-       jmp     .Lcbc_cleanup
-
-.align 4
-.Lcbc_dec_partial:
-       mov     $s0,0+$ivec             # dump output to stack
-       mov     $s1,4+$ivec
-       mov     $s2,8+$ivec
-       mov     $s3,12+$ivec
-       mov     $out,%rdi
-       lea     $ivec,%rsi
-       mov     \$16,%rcx
-       add     %r10,%rcx               # number of bytes to copy
-       .long   0xF689A4F3              # rep movsb
-       jmp     .Lcbc_dec_end
+       jmp     .Lcbc_fast_cleanup
 
 .align 16
-.Lcbc_dec_in_place:
+.Lcbc_fast_dec_in_place:
+       mov     0(%rbp),%r10            # copy iv to stack
+       mov     8(%rbp),%r11
+       mov     %r10,0+$ivec
+       mov     %r11,8+$ivec
+.align 4
+.Lcbc_fast_dec_in_place_loop:
                mov     0($inp),$s0     # load input
                mov     4($inp),$s1
                mov     8($inp),$s2
                mov     12($inp),$s3
+               mov     $keyp,$key      # restore key
+               mov     $inp,$_inp      # if ($verticalspin) save inp
 
-               mov     $inp,$ivec      # if ($verticalspin) save inp
-               mov     $keyp,$key
                call    _x86_64_AES_decrypt
 
-               mov     $ivec,$inp      # if ($verticalspin) restore inp
-               mov     $_ivp,%rbp
-               xor     0(%rbp),$s0
-               xor     4(%rbp),$s1
-               xor     8(%rbp),$s2
-               xor     12(%rbp),$s3
+               mov     $_inp,$inp      # if ($verticalspin) restore inp
+               mov     $_len,%r10
+               xor     0+$ivec,$s0
+               xor     4+$ivec,$s1
+               xor     8+$ivec,$s2
+               xor     12+$ivec,$s3
+
+               mov     0($inp),%r11    # load input
+               mov     8($inp),%r12
+               sub     \$16,%r10
+               jz      .Lcbc_fast_dec_in_place_done
 
-               mov     0($inp),%r10    # copy input to iv
-               mov     8($inp),%r11
-               mov     %r10,0(%rbp)
-               mov     %r11,8(%rbp)
+               mov     %r11,0+$ivec    # copy input to iv
+               mov     %r12,8+$ivec
 
                mov     $s0,0($out)     # save output [zaps input]
                mov     $s1,4($out)
                mov     $s2,8($out)
                mov     $s3,12($out)
 
-               mov     $_len,%rcx
                lea     16($inp),$inp
                lea     16($out),$out
-               sub     \$16,%rcx
-               jc      .Lcbc_dec_in_place_partial
-               mov     %rcx,$_len
-       jnz     .Lcbc_dec_in_place
-       jmp     .Lcbc_cleanup
+               mov     %r10,$_len
+       jmp     .Lcbc_fast_dec_in_place_loop
+.Lcbc_fast_dec_in_place_done:
+       mov     $_ivp,%rdi
+       mov     %r11,0(%rdi)    # copy iv back to user
+       mov     %r12,8(%rdi)
+
+       mov     $s0,0($out)     # save output [zaps input]
+       mov     $s1,4($out)
+       mov     $s2,8($out)
+       mov     $s3,12($out)
 
 .align 4
-.Lcbc_dec_in_place_partial:
-       # one can argue if this is actually required
-       lea     ($out,%rcx),%rdi
-       lea     (%rbp,%rcx),%rsi
-       neg     %rcx
-       .long   0xF689A4F3      # rep movsb     # restore tail
-       jmp     .Lcbc_cleanup
+.Lcbc_fast_cleanup:
+       cmpl    \$0,$mark       # was the key schedule copied?
+       lea     $aes_key,%rdi
+       je      .Lcbc_exit
+               mov     \$240/8,%ecx
+               xor     %rax,%rax
+               .long   0x90AB48F3      # rep stosq
+
+       jmp     .Lcbc_exit
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+.align 16
+.Lcbc_slow_prologue:
+       # allocate aligned stack frame...
+       lea     -88(%rsp),%rbp
+       and     \$-64,%rbp
+       # ... just "above" key schedule
+       lea     -88-63(%rcx),%r10
+       sub     %rbp,%r10
+       neg     %r10
+       and     \$0x3c0,%r10
+       sub     %r10,%rbp
+
+       xchg    %rsp,%rbp
+       #add    \$8,%rsp        # reserve for return address!
+       mov     %rbp,$_rsp      # save %rsp
+.Lcbc_slow_body:
+       #mov    %rdi,$_inp      # save copy of inp
+       #mov    %rsi,$_out      # save copy of out
+       #mov    %rdx,$_len      # save copy of len
+       #mov    %rcx,$_key      # save copy of key
+       mov     %r8,$_ivp       # save copy of ivp
+       mov     %r8,%rbp        # rearrange input arguments
+       mov     %r9,%rbx
+       mov     %rsi,$out
+       mov     %rdi,$inp
+       mov     %rcx,$key
+       mov     %rdx,%r10
+
+       mov     240($key),%eax
+       mov     $key,$keyp      # save key pointer
+       shl     \$4,%eax
+       lea     ($key,%rax),%rax
+       mov     %rax,$keyend
+
+       # pick Te4 copy which can't "overlap" with stack frame or key scdedule
+       lea     2048($sbox),$sbox
+       lea     768-8(%rsp),%rax
+       sub     $sbox,%rax
+       and     \$0x300,%rax
+       lea     ($sbox,%rax),$sbox
+
+       cmp     \$0,%rbx
+       je      .LSLOW_DECRYPT
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+       test    \$-16,%r10              # check upon length
+       mov     0(%rbp),$s0             # load iv
+       mov     4(%rbp),$s1
+       mov     8(%rbp),$s2
+       mov     12(%rbp),$s3
+       jz      .Lcbc_slow_enc_tail     # short input...
+
+.align 4
+.Lcbc_slow_enc_loop:
+               xor     0($inp),$s0
+               xor     4($inp),$s1
+               xor     8($inp),$s2
+               xor     12($inp),$s3
+               mov     $keyp,$key      # restore key
+               mov     $inp,$_inp      # save inp
+               mov     $out,$_out      # save out
+               mov     %r10,$_len      # save len
+
+               call    _x86_64_AES_encrypt_compact
+
+               mov     $_inp,$inp      # restore inp
+               mov     $_out,$out      # restore out
+               mov     $_len,%r10      # restore len
+               mov     $s0,0($out)
+               mov     $s1,4($out)
+               mov     $s2,8($out)
+               mov     $s3,12($out)
+
+               lea     16($inp),$inp
+               lea     16($out),$out
+               sub     \$16,%r10
+               test    \$-16,%r10
+       jnz     .Lcbc_slow_enc_loop
+       test    \$15,%r10
+       jnz     .Lcbc_slow_enc_tail
+       mov     $_ivp,%rbp      # restore ivp
+       mov     $s0,0(%rbp)     # save ivec
+       mov     $s1,4(%rbp)
+       mov     $s2,8(%rbp)
+       mov     $s3,12(%rbp)
+
+       jmp     .Lcbc_exit
+
+.align 4
+.Lcbc_slow_enc_tail:
+       mov     %rax,%r11
+       mov     %rcx,%r12
+       mov     %r10,%rcx
+       mov     $inp,%rsi
+       mov     $out,%rdi
+       .long   0x9066A4F3              # rep movsb
+       mov     \$16,%rcx               # zero tail
+       sub     %r10,%rcx
+       xor     %rax,%rax
+       .long   0x9066AAF3              # rep stosb
+       mov     $out,$inp               # this is not a mistake!
+       mov     \$16,%r10               # len=16
+       mov     %r11,%rax
+       mov     %r12,%rcx
+       jmp     .Lcbc_slow_enc_loop     # one more spin...
+#--------------------------- SLOW DECRYPT ---------------------------#
+.align 16
+.LSLOW_DECRYPT:
+       shr     \$3,%rax
+       add     %rax,$sbox              # recall "magic" constants!
+
+       mov     0(%rbp),%r11            # copy iv to stack
+       mov     8(%rbp),%r12
+       mov     %r11,0+$ivec
+       mov     %r12,8+$ivec
+
+.align 4
+.Lcbc_slow_dec_loop:
+               mov     0($inp),$s0     # load input
+               mov     4($inp),$s1
+               mov     8($inp),$s2
+               mov     12($inp),$s3
+               mov     $keyp,$key      # restore key
+               mov     $inp,$_inp      # save inp
+               mov     $out,$_out      # save out
+               mov     %r10,$_len      # save len
+
+               call    _x86_64_AES_decrypt_compact
+
+               mov     $_inp,$inp      # restore inp
+               mov     $_out,$out      # restore out
+               mov     $_len,%r10
+               xor     0+$ivec,$s0
+               xor     4+$ivec,$s1
+               xor     8+$ivec,$s2
+               xor     12+$ivec,$s3
+
+               mov     0($inp),%r11    # load input
+               mov     8($inp),%r12
+               sub     \$16,%r10
+               jc      .Lcbc_slow_dec_partial
+               jz      .Lcbc_slow_dec_done
+
+               mov     %r11,0+$ivec    # copy input to iv
+               mov     %r12,8+$ivec
+
+               mov     $s0,0($out)     # save output [can zap input]
+               mov     $s1,4($out)
+               mov     $s2,8($out)
+               mov     $s3,12($out)
+
+               lea     16($inp),$inp
+               lea     16($out),$out
+       jmp     .Lcbc_slow_dec_loop
+.Lcbc_slow_dec_done:
+       mov     $_ivp,%rdi
+       mov     %r11,0(%rdi)            # copy iv back to user
+       mov     %r12,8(%rdi)
+
+       mov     $s0,0($out)             # save output [can zap input]
+       mov     $s1,4($out)
+       mov     $s2,8($out)
+       mov     $s3,12($out)
+
+       jmp     .Lcbc_exit
+
+.align 4
+.Lcbc_slow_dec_partial:
+       mov     $_ivp,%rdi
+       mov     %r11,0(%rdi)            # copy iv back to user
+       mov     %r12,8(%rdi)
+
+       mov     $s0,0+$ivec             # save output to stack
+       mov     $s1,4+$ivec
+       mov     $s2,8+$ivec
+       mov     $s3,12+$ivec
+
+       mov     $out,%rdi
+       lea     $ivec,%rsi
+       lea     16(%r10),%rcx
+       .long   0x9066A4F3      # rep movsb
+       jmp     .Lcbc_exit
+
+.align 16
+.Lcbc_exit:
+       mov     $_rsp,%rsi
+       mov     (%rsi),%r15
+       mov     8(%rsi),%r14
+       mov     16(%rsi),%r13
+       mov     24(%rsi),%r12
+       mov     32(%rsi),%rbp
+       mov     40(%rsi),%rbx
+       lea     48(%rsi),%rsp
+.Lcbc_popfq:
+       popfq
+.Lcbc_epilogue:
+       ret
 .size  AES_cbc_encrypt,.-AES_cbc_encrypt
 ___
 }
 
 $code.=<<___;
-.globl AES_Te
 .align 64
-AES_Te:
+.LAES_Te:
 ___
        &_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
        &_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
@@ -2186,9 +2311,8 @@ $code.=<<___;
        .long   0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
 ___
 $code.=<<___;
-.globl AES_Td
 .align 64
-AES_Td:
+.LAES_Td:
 ___
        &_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
        &_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
@@ -2400,8 +2524,285 @@ $code.=<<___;
        .long   0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
        .long   0x1b1b1b1b, 0x1b1b1b1b, 0, 0
 .asciz  "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 64
 ___
 
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#              CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern        __imp_RtlVirtualUnwind
+.type  block_se_handler,\@abi-omnipotent
+.align 16
+block_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lin_block_prologue
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lin_block_prologue
+
+       mov     24(%rax),%rax           # pull saved real stack pointer
+       lea     48(%rax),%rax           # adjust...
+
+       mov     -8(%rax),%rbx
+       mov     -16(%rax),%rbp
+       mov     -24(%rax),%r12
+       mov     -32(%rax),%r13
+       mov     -40(%rax),%r14
+       mov     -48(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R15
+
+.Lin_block_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       jmp     .Lcommon_seh_exit
+.size  block_se_handler,.-block_se_handler
+
+.type  key_se_handler,\@abi-omnipotent
+.align 16
+key_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lin_key_prologue
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lin_key_prologue
+
+       lea     56(%rax),%rax
+
+       mov     -8(%rax),%rbx
+       mov     -16(%rax),%rbp
+       mov     -24(%rax),%r12
+       mov     -32(%rax),%r13
+       mov     -40(%rax),%r14
+       mov     -48(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R15
+
+.Lin_key_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       jmp     .Lcommon_seh_exit
+.size  key_se_handler,.-key_se_handler
+
+.type  cbc_se_handler,\@abi-omnipotent
+.align 16
+cbc_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       lea     .Lcbc_prologue(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_prologue
+       jb      .Lin_cbc_prologue
+
+       lea     .Lcbc_fast_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_fast_body
+       jb      .Lin_cbc_frame_setup
+
+       lea     .Lcbc_slow_prologue(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_slow_prologue
+       jb      .Lin_cbc_body
+
+       lea     .Lcbc_slow_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lcbc_slow_body
+       jb      .Lin_cbc_frame_setup
+
+.Lin_cbc_body:
+       mov     152($context),%rax      # pull context->Rsp
+
+       lea     .Lcbc_epilogue(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip>=.Lcbc_epilogue
+       jae     .Lin_cbc_prologue
+
+       lea     8(%rax),%rax
+
+       lea     .Lcbc_popfq(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip>=.Lcbc_popfq
+       jae     .Lin_cbc_prologue
+
+       mov     `16-8`(%rax),%rax       # biased $_rsp
+       lea     56(%rax),%rax
+
+.Lin_cbc_frame_setup:
+       mov     -16(%rax),%rbx
+       mov     -24(%rax),%rbp
+       mov     -32(%rax),%r12
+       mov     -40(%rax),%r13
+       mov     -48(%rax),%r14
+       mov     -56(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R15
+
+.Lin_cbc_prologue:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+.Lcommon_seh_exit:
+
+       mov     40($disp),%rdi          # disp->ContextRecord
+       mov     $context,%rsi           # context
+       mov     \$`1232/8`,%ecx         # sizeof(CONTEXT)
+       .long   0xa548f3fc              # cld; rep movsq
+
+       mov     $disp,%rsi
+       xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
+       mov     8(%rsi),%rdx            # arg2, disp->ImageBase
+       mov     0(%rsi),%r8             # arg3, disp->ControlPc
+       mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
+       mov     40(%rsi),%r10           # disp->ContextRecord
+       lea     56(%rsi),%r11           # &disp->HandlerData
+       lea     24(%rsi),%r12           # &disp->EstablisherFrame
+       mov     %r10,32(%rsp)           # arg5
+       mov     %r11,40(%rsp)           # arg6
+       mov     %r12,48(%rsp)           # arg7
+       mov     %rcx,56(%rsp)           # arg8, (NULL)
+       call    *__imp_RtlVirtualUnwind(%rip)
+
+       mov     \$1,%eax                # ExceptionContinueSearch
+       add     \$64,%rsp
+       popfq
+       pop     %r15
+       pop     %r14
+       pop     %r13
+       pop     %r12
+       pop     %rbp
+       pop     %rbx
+       pop     %rdi
+       pop     %rsi
+       ret
+.size  cbc_se_handler,.-cbc_se_handler
+
+.section       .pdata
+.align 4
+       .rva    .LSEH_begin_AES_encrypt
+       .rva    .LSEH_end_AES_encrypt
+       .rva    .LSEH_info_AES_encrypt
+
+       .rva    .LSEH_begin_AES_decrypt
+       .rva    .LSEH_end_AES_decrypt
+       .rva    .LSEH_info_AES_decrypt
+
+       .rva    .LSEH_begin_AES_set_encrypt_key
+       .rva    .LSEH_end_AES_set_encrypt_key
+       .rva    .LSEH_info_AES_set_encrypt_key
+
+       .rva    .LSEH_begin_AES_set_decrypt_key
+       .rva    .LSEH_end_AES_set_decrypt_key
+       .rva    .LSEH_info_AES_set_decrypt_key
+
+       .rva    .LSEH_begin_AES_cbc_encrypt
+       .rva    .LSEH_end_AES_cbc_encrypt
+       .rva    .LSEH_info_AES_cbc_encrypt
+
+.section       .xdata
+.align 8
+.LSEH_info_AES_encrypt:
+       .byte   9,0,0,0
+       .rva    block_se_handler
+       .rva    .Lenc_prologue,.Lenc_epilogue   # HandlerData[]
+.LSEH_info_AES_decrypt:
+       .byte   9,0,0,0
+       .rva    block_se_handler
+       .rva    .Ldec_prologue,.Ldec_epilogue   # HandlerData[]
+.LSEH_info_AES_set_encrypt_key:
+       .byte   9,0,0,0
+       .rva    key_se_handler
+       .rva    .Lenc_key_prologue,.Lenc_key_epilogue   # HandlerData[]
+.LSEH_info_AES_set_decrypt_key:
+       .byte   9,0,0,0
+       .rva    key_se_handler
+       .rva    .Ldec_key_prologue,.Ldec_key_epilogue   # HandlerData[]
+.LSEH_info_AES_cbc_encrypt:
+       .byte   9,0,0,0
+       .rva    cbc_se_handler
+___
+}
+
 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
 
 print $code;