chacha/asm/chacha-x86_64.pl: add CFI annotations.
[openssl.git] / crypto / chacha / asm / chacha-x86_64.pl
index 7153d82e121f06d6c24f77c7f892361027fb6f6a..b59d96f8da64ae3b9a62350aff7fa5f60ad0b929 100755 (executable)
@@ -32,6 +32,7 @@
 # Sandy Bridge 8.31/+42%       5.45/6.76       2.72
 # Ivy Bridge   6.71/+46%       5.40/6.49       2.41
 # Haswell      5.92/+43%       5.20/6.45       2.42        1.23
+# Skylake      5.87/+39%       4.70/-          2.31        1.19
 # Silvermont   12.0/+33%       7.75/7.40       7.03(iii)
 # Goldmont     10.6/+17%       5.10/-          3.28
 # Sledgehammer 7.28/+52%       -/14.2(ii)      -
@@ -66,7 +67,7 @@ if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
 }
 
 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
-          `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+          `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
        $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
        $avx += 1 if ($1==2.11 && $2>=8);
 }
@@ -111,6 +112,10 @@ $code.=<<___;
 .Lsigma:
 .asciz "expand 32-byte k"
 .align 64
+.Lzeroz:
+.long  0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
+.Lfourz:
+.long  4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
 .Lincz:
 .long  0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
 .Lsixteen:
@@ -237,19 +242,34 @@ $code.=<<___;
 .type  ChaCha20_ctr32,\@function,5
 .align 64
 ChaCha20_ctr32:
+.cfi_startproc
        cmp     \$0,$len
        je      .Lno_data
        mov     OPENSSL_ia32cap_P+4(%rip),%r10
+___
+$code.=<<___   if ($avx>2);
+       bt      \$48,%r10               # check for AVX512F
+       jc      .LChaCha20_avx512
+___
+$code.=<<___;
        test    \$`1<<(41-32)`,%r10d
        jnz     .LChaCha20_ssse3
 
        push    %rbx
+.cfi_push      %rbx
        push    %rbp
+.cfi_push      %rbp
        push    %r12
+.cfi_push      %r12
        push    %r13
+.cfi_push      %r13
        push    %r14
+.cfi_push      %r14
        push    %r15
+.cfi_push      %r15
        sub     \$64+24,%rsp
+.cfi_adjust_cfa_offset 64+24
+.Lctr32_body:
 
        #movdqa .Lsigma(%rip),%xmm0
        movdqu  ($key),%xmm1
@@ -388,15 +408,25 @@ $code.=<<___;
        jnz     .Loop_tail
 
 .Ldone:
-       add     \$64+24,%rsp
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
+       lea     64+24+48(%rsp),%rsi
+.cfi_def_cfa   %rsi,8
+       mov     -48(%rsi),%r15
+.cfi_restore   %r15
+       mov     -40(%rsi),%r14
+.cfi_restore   %r14
+       mov     -32(%rsi),%r13
+.cfi_restore   %r13
+       mov     -24(%rsi),%r12
+.cfi_restore   %r12
+       mov     -16(%rsi),%rbp
+.cfi_restore   %rbp
+       mov     -8(%rsi),%rbx
+.cfi_restore   %rbx
+       lea     (%rsi),%rsp
+.cfi_def_cfa_register  %rsp
 .Lno_data:
        ret
+.cfi_endproc
 .size  ChaCha20_ctr32,.-ChaCha20_ctr32
 ___
 
@@ -429,13 +459,16 @@ sub SSSE3ROUND {  # critical path is 20 "SIMD ticks" per round
        &por    ($b,$t);
 }
 
-my $xframe = $win64 ? 32+32+8 : 24;
+my $xframe = $win64 ? 32+8 : 8;
 
 $code.=<<___;
 .type  ChaCha20_ssse3,\@function,5
 .align 32
 ChaCha20_ssse3:
+.cfi_startproc
 .LChaCha20_ssse3:
+       mov     %rsp,%r9                # frame pointer
+.cfi_def_cfa_register  %r9
 ___
 $code.=<<___   if ($avx);
        test    \$`1<<(43-32)`,%r10d
@@ -446,18 +479,12 @@ $code.=<<___;
        ja      .LChaCha20_4x           # but overall it won't be slower
 
 .Ldo_sse3_after_all:
-       push    %rbx
-       push    %rbp
-       push    %r12
-       push    %r13
-       push    %r14
-       push    %r15
-
        sub     \$64+$xframe,%rsp
 ___
 $code.=<<___   if ($win64);
-       movaps  %xmm6,64+32(%rsp)
-       movaps  %xmm7,64+48(%rsp)
+       movaps  %xmm6,-0x28(%r9)
+       movaps  %xmm7,-0x18(%r9)
+.Lssse3_body:
 ___
 $code.=<<___;
        movdqa  .Lsigma(%rip),$a
@@ -471,7 +498,7 @@ $code.=<<___;
        movdqa  $b,0x10(%rsp)
        movdqa  $c,0x20(%rsp)
        movdqa  $d,0x30(%rsp)
-       mov     \$10,%ebp
+       mov     \$10,$counter           # reuse $counter
        jmp     .Loop_ssse3
 
 .align 32
@@ -481,7 +508,7 @@ $code.=<<___;
        movdqa  0x10(%rsp),$b
        movdqa  0x20(%rsp),$c
        paddd   0x30(%rsp),$d
-       mov     \$10,%ebp
+       mov     \$10,$counter
        movdqa  $d,0x30(%rsp)
        jmp     .Loop_ssse3
 
@@ -499,7 +526,7 @@ ___
        &pshufd ($b,$b,0b10010011);
        &pshufd ($d,$d,0b00111001);
 
-       &dec    ("%ebp");
+       &dec    ($counter);
        &jnz    (".Loop_ssse3");
 
 $code.=<<___;
@@ -538,32 +565,29 @@ $code.=<<___;
        movdqa  $b,0x10(%rsp)
        movdqa  $c,0x20(%rsp)
        movdqa  $d,0x30(%rsp)
-       xor     %rbx,%rbx
+       xor     $counter,$counter
 
 .Loop_tail_ssse3:
-       movzb   ($inp,%rbx),%eax
-       movzb   (%rsp,%rbx),%ecx
-       lea     1(%rbx),%rbx
+       movzb   ($inp,$counter),%eax
+       movzb   (%rsp,$counter),%ecx
+       lea     1($counter),$counter
        xor     %ecx,%eax
-       mov     %al,-1($out,%rbx)
+       mov     %al,-1($out,$counter)
        dec     $len
        jnz     .Loop_tail_ssse3
 
 .Ldone_ssse3:
 ___
 $code.=<<___   if ($win64);
-       movaps  64+32(%rsp),%xmm6
-       movaps  64+48(%rsp),%xmm7
+       movaps  -0x28(%r9),%xmm6
+       movaps  -0x18(%r9),%xmm7
 ___
 $code.=<<___;
-       add     \$64+$xframe,%rsp
-       pop     %r15
-       pop     %r14
-       pop     %r13
-       pop     %r12
-       pop     %rbp
-       pop     %rbx
+       lea     (%r9),%rsp
+.cfi_def_cfa_register  %rsp
+.Lssse3_epilogue:
        ret
+.cfi_endproc
 .size  ChaCha20_ssse3,.-ChaCha20_ssse3
 ___
 }
@@ -699,13 +723,16 @@ my @x=map("\"$_\"",@xx);
        );
 }
 
-my $xframe = $win64 ? 0xa0 : 0;
+my $xframe = $win64 ? 0xa8 : 8;
 
 $code.=<<___;
 .type  ChaCha20_4x,\@function,5
 .align 32
 ChaCha20_4x:
+.cfi_startproc
 .LChaCha20_4x:
+       mov             %rsp,%r9                # frame pointer
+.cfi_def_cfa_register  %r9
        mov             %r10,%r11
 ___
 $code.=<<___   if ($avx>1);
@@ -722,8 +749,7 @@ $code.=<<___;
        je              .Ldo_sse3_after_all     # to detect Atom
 
 .Lproceed4x:
-       lea             -0x78(%rsp),%r11
-       sub             \$0x148+$xframe,%rsp
+       sub             \$0x140+$xframe,%rsp
 ___
        ################ stack layout
        # +0x00         SIMD equivalent of @x[8-12]
@@ -734,16 +760,17 @@ ___
        # ...
        # +0x140
 $code.=<<___   if ($win64);
-       movaps          %xmm6,-0x30(%r11)
-       movaps          %xmm7,-0x20(%r11)
-       movaps          %xmm8,-0x10(%r11)
-       movaps          %xmm9,0x00(%r11)
-       movaps          %xmm10,0x10(%r11)
-       movaps          %xmm11,0x20(%r11)
-       movaps          %xmm12,0x30(%r11)
-       movaps          %xmm13,0x40(%r11)
-       movaps          %xmm14,0x50(%r11)
-       movaps          %xmm15,0x60(%r11)
+       movaps          %xmm6,-0xa8(%r9)
+       movaps          %xmm7,-0x98(%r9)
+       movaps          %xmm8,-0x88(%r9)
+       movaps          %xmm9,-0x78(%r9)
+       movaps          %xmm10,-0x68(%r9)
+       movaps          %xmm11,-0x58(%r9)
+       movaps          %xmm12,-0x48(%r9)
+       movaps          %xmm13,-0x38(%r9)
+       movaps          %xmm14,-0x28(%r9)
+       movaps          %xmm15,-0x18(%r9)
+.L4x_body:
 ___
 $code.=<<___;
        movdqa          .Lsigma(%rip),$xa3      # key[0]
@@ -1132,21 +1159,23 @@ $code.=<<___;
 .Ldone4x:
 ___
 $code.=<<___   if ($win64);
-       lea             0x140+0x30(%rsp),%r11
-       movaps          -0x30(%r11),%xmm6
-       movaps          -0x20(%r11),%xmm7
-       movaps          -0x10(%r11),%xmm8
-       movaps          0x00(%r11),%xmm9
-       movaps          0x10(%r11),%xmm10
-       movaps          0x20(%r11),%xmm11
-       movaps          0x30(%r11),%xmm12
-       movaps          0x40(%r11),%xmm13
-       movaps          0x50(%r11),%xmm14
-       movaps          0x60(%r11),%xmm15
+       movaps          -0xa8(%r9),%xmm6
+       movaps          -0x98(%r9),%xmm7
+       movaps          -0x88(%r9),%xmm8
+       movaps          -0x78(%r9),%xmm9
+       movaps          -0x68(%r9),%xmm10
+       movaps          -0x58(%r9),%xmm11
+       movaps          -0x48(%r9),%xmm12
+       movaps          -0x38(%r9),%xmm13
+       movaps          -0x28(%r9),%xmm14
+       movaps          -0x18(%r9),%xmm15
 ___
 $code.=<<___;
-       add             \$0x148+$xframe,%rsp
+       lea             (%r9),%rsp
+.cfi_def_cfa_register  %rsp
+.L4x_epilogue:
        ret
+.cfi_endproc
 .size  ChaCha20_4x,.-ChaCha20_4x
 ___
 }
@@ -1227,15 +1256,17 @@ my @x=map("\"$_\"",@xx);
        );
 }
 
-my $xframe = $win64 ? 0xa0 : 0;
+my $xframe = $win64 ? 0xa8 : 8;
 
 $code.=<<___;
 .type  ChaCha20_4xop,\@function,5
 .align 32
 ChaCha20_4xop:
+.cfi_startproc
 .LChaCha20_4xop:
-       lea             -0x78(%rsp),%r11
-       sub             \$0x148+$xframe,%rsp
+       mov             %rsp,%r9                # frame pointer
+.cfi_def_cfa_register  %r9
+       sub             \$0x140+$xframe,%rsp
 ___
        ################ stack layout
        # +0x00         SIMD equivalent of @x[8-12]
@@ -1246,16 +1277,17 @@ ___
        # ...
        # +0x140
 $code.=<<___   if ($win64);
-       movaps          %xmm6,-0x30(%r11)
-       movaps          %xmm7,-0x20(%r11)
-       movaps          %xmm8,-0x10(%r11)
-       movaps          %xmm9,0x00(%r11)
-       movaps          %xmm10,0x10(%r11)
-       movaps          %xmm11,0x20(%r11)
-       movaps          %xmm12,0x30(%r11)
-       movaps          %xmm13,0x40(%r11)
-       movaps          %xmm14,0x50(%r11)
-       movaps          %xmm15,0x60(%r11)
+       movaps          %xmm6,-0xa8(%r9)
+       movaps          %xmm7,-0x98(%r9)
+       movaps          %xmm8,-0x88(%r9)
+       movaps          %xmm9,-0x78(%r9)
+       movaps          %xmm10,-0x68(%r9)
+       movaps          %xmm11,-0x58(%r9)
+       movaps          %xmm12,-0x48(%r9)
+       movaps          %xmm13,-0x38(%r9)
+       movaps          %xmm14,-0x28(%r9)
+       movaps          %xmm15,-0x18(%r9)
+.L4xop_body:
 ___
 $code.=<<___;
        vzeroupper
@@ -1583,21 +1615,23 @@ $code.=<<___;
        vzeroupper
 ___
 $code.=<<___   if ($win64);
-       lea             0x140+0x30(%rsp),%r11
-       movaps          -0x30(%r11),%xmm6
-       movaps          -0x20(%r11),%xmm7
-       movaps          -0x10(%r11),%xmm8
-       movaps          0x00(%r11),%xmm9
-       movaps          0x10(%r11),%xmm10
-       movaps          0x20(%r11),%xmm11
-       movaps          0x30(%r11),%xmm12
-       movaps          0x40(%r11),%xmm13
-       movaps          0x50(%r11),%xmm14
-       movaps          0x60(%r11),%xmm15
+       movaps          -0xa8(%r9),%xmm6
+       movaps          -0x98(%r9),%xmm7
+       movaps          -0x88(%r9),%xmm8
+       movaps          -0x78(%r9),%xmm9
+       movaps          -0x68(%r9),%xmm10
+       movaps          -0x58(%r9),%xmm11
+       movaps          -0x48(%r9),%xmm12
+       movaps          -0x38(%r9),%xmm13
+       movaps          -0x28(%r9),%xmm14
+       movaps          -0x18(%r9),%xmm15
 ___
 $code.=<<___;
-       add             \$0x148+$xframe,%rsp
+       lea             (%r9),%rsp
+.cfi_def_cfa_register  %rsp
+.L4xop_epilogue:
        ret
+.cfi_endproc
 .size  ChaCha20_4xop,.-ChaCha20_4xop
 ___
 }
@@ -1724,39 +1758,34 @@ my @x=map("\"$_\"",@xx);
        );
 }
 
-my $xframe = $win64 ? 0xb0 : 8;
+my $xframe = $win64 ? 0xa8 : 8;
 
 $code.=<<___;
 .type  ChaCha20_8x,\@function,5
 .align 32
 ChaCha20_8x:
+.cfi_startproc
 .LChaCha20_8x:
-___
-$code.=<<___           if ($avx>2);
-       test            \$`1<<16`,%r10d                 # check for AVX512F
-       jnz             .LChaCha20_16x
-___
-$code.=<<___;
-       mov             %rsp,%r10
+       mov             %rsp,%r9                # frame register
+.cfi_def_cfa_register  %r9
        sub             \$0x280+$xframe,%rsp
        and             \$-32,%rsp
 ___
 $code.=<<___   if ($win64);
-       lea             0x290+0x30(%rsp),%r11
-       movaps          %xmm6,-0x30(%r11)
-       movaps          %xmm7,-0x20(%r11)
-       movaps          %xmm8,-0x10(%r11)
-       movaps          %xmm9,0x00(%r11)
-       movaps          %xmm10,0x10(%r11)
-       movaps          %xmm11,0x20(%r11)
-       movaps          %xmm12,0x30(%r11)
-       movaps          %xmm13,0x40(%r11)
-       movaps          %xmm14,0x50(%r11)
-       movaps          %xmm15,0x60(%r11)
+       movaps          %xmm6,-0xa8(%r9)
+       movaps          %xmm7,-0x98(%r9)
+       movaps          %xmm8,-0x88(%r9)
+       movaps          %xmm9,-0x78(%r9)
+       movaps          %xmm10,-0x68(%r9)
+       movaps          %xmm11,-0x58(%r9)
+       movaps          %xmm12,-0x48(%r9)
+       movaps          %xmm13,-0x38(%r9)
+       movaps          %xmm14,-0x28(%r9)
+       movaps          %xmm15,-0x18(%r9)
+.L8x_body:
 ___
 $code.=<<___;
        vzeroupper
-       mov             %r10,0x280(%rsp)
 
        ################ stack layout
        # +0x00         SIMD equivalent of @x[8-12]
@@ -1765,7 +1794,7 @@ $code.=<<___;
        # ...
        # +0x200        SIMD counters (with nonce smashed by lanes)
        # ...
-       # +0x280        saved %rsp
+       # +0x280
 
        vbroadcasti128  .Lsigma(%rip),$xa3      # key[0]
        vbroadcasti128  ($key),$xb3             # key[1]
@@ -2228,24 +2257,26 @@ $code.=<<___;
        jnz             .Loop_tail8x
 
 .Ldone8x:
-       vzeroupper
+       vzeroall
 ___
 $code.=<<___   if ($win64);
-       lea             0x290+0x30(%rsp),%r11
-       movaps          -0x30(%r11),%xmm6
-       movaps          -0x20(%r11),%xmm7
-       movaps          -0x10(%r11),%xmm8
-       movaps          0x00(%r11),%xmm9
-       movaps          0x10(%r11),%xmm10
-       movaps          0x20(%r11),%xmm11
-       movaps          0x30(%r11),%xmm12
-       movaps          0x40(%r11),%xmm13
-       movaps          0x50(%r11),%xmm14
-       movaps          0x60(%r11),%xmm15
+       movaps          -0xa8(%r9),%xmm6
+       movaps          -0x98(%r9),%xmm7
+       movaps          -0x88(%r9),%xmm8
+       movaps          -0x78(%r9),%xmm9
+       movaps          -0x68(%r9),%xmm10
+       movaps          -0x58(%r9),%xmm11
+       movaps          -0x48(%r9),%xmm12
+       movaps          -0x38(%r9),%xmm13
+       movaps          -0x28(%r9),%xmm14
+       movaps          -0x18(%r9),%xmm15
 ___
 $code.=<<___;
-       mov             0x280(%rsp),%rsp
+       lea             (%r9),%rsp
+.cfi_def_cfa_register  %rsp
+.L8x_epilogue:
        ret
+.cfi_endproc
 .size  ChaCha20_8x,.-ChaCha20_8x
 ___
 }
@@ -2253,6 +2284,228 @@ ___
 ########################################################################
 # AVX512 code paths
 if ($avx>2) {
+# This one handles shorter inputs...
+
+my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
+my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+sub AVX512ROUND {      # critical path is 14 "SIMD ticks" per round
+       &vpaddd ($a,$a,$b);
+       &vpxord ($d,$d,$a);
+       &vprold ($d,$d,16);
+
+       &vpaddd ($c,$c,$d);
+       &vpxord ($b,$b,$c);
+       &vprold ($b,$b,12);
+
+       &vpaddd ($a,$a,$b);
+       &vpxord ($d,$d,$a);
+       &vprold ($d,$d,8);
+
+       &vpaddd ($c,$c,$d);
+       &vpxord ($b,$b,$c);
+       &vprold ($b,$b,7);
+}
+
+my $xframe = $win64 ? 32+8 : 8;
+
+$code.=<<___;
+.type  ChaCha20_avx512,\@function,5
+.align 32
+ChaCha20_avx512:
+.cfi_startproc
+.LChaCha20_avx512:
+       mov     %rsp,%r9                # frame pointer
+.cfi_def_cfa_register  %r9
+       cmp     \$512,$len
+       ja      .LChaCha20_16x
+
+       sub     \$64+$xframe,%rsp
+___
+$code.=<<___   if ($win64);
+       movaps  %xmm6,-0x28(%r9)
+       movaps  %xmm7,-0x18(%r9)
+.Lavx512_body:
+___
+$code.=<<___;
+       vbroadcasti32x4 .Lsigma(%rip),$a
+       vbroadcasti32x4 ($key),$b
+       vbroadcasti32x4 16($key),$c
+       vbroadcasti32x4 ($counter),$d
+
+       vmovdqa32       $a,$a_
+       vmovdqa32       $b,$b_
+       vmovdqa32       $c,$c_
+       vpaddd          .Lzeroz(%rip),$d,$d
+       vmovdqa32       .Lfourz(%rip),$fourz
+       mov             \$10,$counter   # reuse $counter
+       vmovdqa32       $d,$d_
+       jmp             .Loop_avx512
+
+.align 16
+.Loop_outer_avx512:
+       vmovdqa32       $a_,$a
+       vmovdqa32       $b_,$b
+       vmovdqa32       $c_,$c
+       vpaddd          $fourz,$d_,$d
+       mov             \$10,$counter
+       vmovdqa32       $d,$d_
+       jmp             .Loop_avx512
+
+.align 32
+.Loop_avx512:
+___
+       &AVX512ROUND();
+       &vpshufd        ($c,$c,0b01001110);
+       &vpshufd        ($b,$b,0b00111001);
+       &vpshufd        ($d,$d,0b10010011);
+
+       &AVX512ROUND();
+       &vpshufd        ($c,$c,0b01001110);
+       &vpshufd        ($b,$b,0b10010011);
+       &vpshufd        ($d,$d,0b00111001);
+
+       &dec            ($counter);
+       &jnz            (".Loop_avx512");
+
+$code.=<<___;
+       vpaddd          $a_,$a,$a
+       vpaddd          $b_,$b,$b
+       vpaddd          $c_,$c,$c
+       vpaddd          $d_,$d,$d
+
+       sub             \$64,$len
+       jb              .Ltail64_avx512
+
+       vpxor           0x00($inp),%x#$a,$t0    # xor with input
+       vpxor           0x10($inp),%x#$b,$t1
+       vpxor           0x20($inp),%x#$c,$t2
+       vpxor           0x30($inp),%x#$d,$t3
+       lea             0x40($inp),$inp         # inp+=64
+
+       vmovdqu         $t0,0x00($out)          # write output
+       vmovdqu         $t1,0x10($out)
+       vmovdqu         $t2,0x20($out)
+       vmovdqu         $t3,0x30($out)
+       lea             0x40($out),$out         # out+=64
+
+       jz              .Ldone_avx512
+
+       vextracti32x4   \$1,$a,$t0
+       vextracti32x4   \$1,$b,$t1
+       vextracti32x4   \$1,$c,$t2
+       vextracti32x4   \$1,$d,$t3
+
+       sub             \$64,$len
+       jb              .Ltail_avx512
+
+       vpxor           0x00($inp),$t0,$t0      # xor with input
+       vpxor           0x10($inp),$t1,$t1
+       vpxor           0x20($inp),$t2,$t2
+       vpxor           0x30($inp),$t3,$t3
+       lea             0x40($inp),$inp         # inp+=64
+
+       vmovdqu         $t0,0x00($out)          # write output
+       vmovdqu         $t1,0x10($out)
+       vmovdqu         $t2,0x20($out)
+       vmovdqu         $t3,0x30($out)
+       lea             0x40($out),$out         # out+=64
+
+       jz              .Ldone_avx512
+
+       vextracti32x4   \$2,$a,$t0
+       vextracti32x4   \$2,$b,$t1
+       vextracti32x4   \$2,$c,$t2
+       vextracti32x4   \$2,$d,$t3
+
+       sub             \$64,$len
+       jb              .Ltail_avx512
+
+       vpxor           0x00($inp),$t0,$t0      # xor with input
+       vpxor           0x10($inp),$t1,$t1
+       vpxor           0x20($inp),$t2,$t2
+       vpxor           0x30($inp),$t3,$t3
+       lea             0x40($inp),$inp         # inp+=64
+
+       vmovdqu         $t0,0x00($out)          # write output
+       vmovdqu         $t1,0x10($out)
+       vmovdqu         $t2,0x20($out)
+       vmovdqu         $t3,0x30($out)
+       lea             0x40($out),$out         # out+=64
+
+       jz              .Ldone_avx512
+
+       vextracti32x4   \$3,$a,$t0
+       vextracti32x4   \$3,$b,$t1
+       vextracti32x4   \$3,$c,$t2
+       vextracti32x4   \$3,$d,$t3
+
+       sub             \$64,$len
+       jb              .Ltail_avx512
+
+       vpxor           0x00($inp),$t0,$t0      # xor with input
+       vpxor           0x10($inp),$t1,$t1
+       vpxor           0x20($inp),$t2,$t2
+       vpxor           0x30($inp),$t3,$t3
+       lea             0x40($inp),$inp         # inp+=64
+
+       vmovdqu         $t0,0x00($out)          # write output
+       vmovdqu         $t1,0x10($out)
+       vmovdqu         $t2,0x20($out)
+       vmovdqu         $t3,0x30($out)
+       lea             0x40($out),$out         # out+=64
+
+       jnz             .Loop_outer_avx512
+
+       jmp             .Ldone_avx512
+
+.align 16
+.Ltail64_avx512:
+       vmovdqa         %x#$a,0x00(%rsp)
+       vmovdqa         %x#$b,0x10(%rsp)
+       vmovdqa         %x#$c,0x20(%rsp)
+       vmovdqa         %x#$d,0x30(%rsp)
+       add             \$64,$len
+       jmp             .Loop_tail_avx512
+
+.align 16
+.Ltail_avx512:
+       vmovdqa         $t0,0x00(%rsp)
+       vmovdqa         $t1,0x10(%rsp)
+       vmovdqa         $t2,0x20(%rsp)
+       vmovdqa         $t3,0x30(%rsp)
+       add             \$64,$len
+
+.Loop_tail_avx512:
+       movzb           ($inp,$counter),%eax
+       movzb           (%rsp,$counter),%ecx
+       lea             1($counter),$counter
+       xor             %ecx,%eax
+       mov             %al,-1($out,$counter)
+       dec             $len
+       jnz             .Loop_tail_avx512
+
+       vmovdqa32       $a_,0x00(%rsp)
+
+.Ldone_avx512:
+       vzeroall
+___
+$code.=<<___   if ($win64);
+       movaps  -0x28(%r9),%xmm6
+       movaps  -0x18(%r9),%xmm7
+___
+$code.=<<___;
+       lea     (%r9),%rsp
+.cfi_def_cfa_register  %rsp
+.Lavx512_epilogue:
+       ret
+.cfi_endproc
+.size  ChaCha20_avx512,.-ChaCha20_avx512
+___
+}
+if ($avx>2) {
+# This one handles longer inputs...
+
 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
     $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
@@ -2322,29 +2575,31 @@ my @x=map("\"$_\"",@xx);
        );
 }
 
-my $xframe = $win64 ? 0xb0 : 8;
+my $xframe = $win64 ? 0xa8 : 8;
 
 $code.=<<___;
 .type  ChaCha20_16x,\@function,5
 .align 32
 ChaCha20_16x:
+.cfi_startproc
 .LChaCha20_16x:
-       mov             %rsp,%r11
+       mov             %rsp,%r9                # frame register
+.cfi_def_cfa_register  %r9
        sub             \$64+$xframe,%rsp
        and             \$-64,%rsp
 ___
 $code.=<<___   if ($win64);
-       lea             0x290+0x30(%rsp),%r11
-       movaps          %xmm6,-0x30(%r11)
-       movaps          %xmm7,-0x20(%r11)
-       movaps          %xmm8,-0x10(%r11)
-       movaps          %xmm9,0x00(%r11)
-       movaps          %xmm10,0x10(%r11)
-       movaps          %xmm11,0x20(%r11)
-       movaps          %xmm12,0x30(%r11)
-       movaps          %xmm13,0x40(%r11)
-       movaps          %xmm14,0x50(%r11)
-       movaps          %xmm15,0x60(%r11)
+       movaps          %xmm6,-0xa8(%r9)
+       movaps          %xmm7,-0x98(%r9)
+       movaps          %xmm8,-0x88(%r9)
+       movaps          %xmm9,-0x78(%r9)
+       movaps          %xmm10,-0x68(%r9)
+       movaps          %xmm11,-0x58(%r9)
+       movaps          %xmm12,-0x48(%r9)
+       movaps          %xmm13,-0x38(%r9)
+       movaps          %xmm14,-0x28(%r9)
+       movaps          %xmm15,-0x18(%r9)
+.L16x_body:
 ___
 $code.=<<___;
        vzeroupper
@@ -2727,33 +2982,288 @@ $code.=<<___;
        dec             $len
        jnz             .Loop_tail16x
 
+       vpxord          $xa0,$xa0,$xa0
+       vmovdqa32       $xa0,0(%rsp)
+
 .Ldone16x:
-       vzeroupper
+       vzeroall
 ___
 $code.=<<___   if ($win64);
-       lea             0x290+0x30(%rsp),%r11
-       movaps          -0x30(%r11),%xmm6
-       movaps          -0x20(%r11),%xmm7
-       movaps          -0x10(%r11),%xmm8
-       movaps          0x00(%r11),%xmm9
-       movaps          0x10(%r11),%xmm10
-       movaps          0x20(%r11),%xmm11
-       movaps          0x30(%r11),%xmm12
-       movaps          0x40(%r11),%xmm13
-       movaps          0x50(%r11),%xmm14
-       movaps          0x60(%r11),%xmm15
+       movaps          -0xa8(%r9),%xmm6
+       movaps          -0x98(%r9),%xmm7
+       movaps          -0x88(%r9),%xmm8
+       movaps          -0x78(%r9),%xmm9
+       movaps          -0x68(%r9),%xmm10
+       movaps          -0x58(%r9),%xmm11
+       movaps          -0x48(%r9),%xmm12
+       movaps          -0x38(%r9),%xmm13
+       movaps          -0x28(%r9),%xmm14
+       movaps          -0x18(%r9),%xmm15
 ___
 $code.=<<___;
-       mov             %r11,%rsp
+       lea             (%r9),%rsp
+.cfi_def_cfa_register  %rsp
+.L16x_epilogue:
        ret
+.cfi_endproc
 .size  ChaCha20_16x,.-ChaCha20_16x
 ___
 }
 
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#              CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern        __imp_RtlVirtualUnwind
+.type  se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       lea     .Lctr32_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<.Lprologue
+       jb      .Lcommon_seh_tail
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       lea     .Lno_data(%rip),%r10    # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=.Lepilogue
+       jae     .Lcommon_seh_tail
+
+       lea     64+24+48(%rax),%rax
+
+       mov     -8(%rax),%rbx
+       mov     -16(%rax),%rbp
+       mov     -24(%rax),%r12
+       mov     -32(%rax),%r13
+       mov     -40(%rax),%r14
+       mov     -48(%rax),%r15
+       mov     %rbx,144($context)      # restore context->Rbx
+       mov     %rbp,160($context)      # restore context->Rbp
+       mov     %r12,216($context)      # restore context->R12
+       mov     %r13,224($context)      # restore context->R13
+       mov     %r14,232($context)      # restore context->R14
+       mov     %r15,240($context)      # restore context->R14
+
+.Lcommon_seh_tail:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       mov     40($disp),%rdi          # disp->ContextRecord
+       mov     $context,%rsi           # context
+       mov     \$154,%ecx              # sizeof(CONTEXT)
+       .long   0xa548f3fc              # cld; rep movsq
+
+       mov     $disp,%rsi
+       xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
+       mov     8(%rsi),%rdx            # arg2, disp->ImageBase
+       mov     0(%rsi),%r8             # arg3, disp->ControlPc
+       mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
+       mov     40(%rsi),%r10           # disp->ContextRecord
+       lea     56(%rsi),%r11           # &disp->HandlerData
+       lea     24(%rsi),%r12           # &disp->EstablisherFrame
+       mov     %r10,32(%rsp)           # arg5
+       mov     %r11,40(%rsp)           # arg6
+       mov     %r12,48(%rsp)           # arg7
+       mov     %rcx,56(%rsp)           # arg8, (NULL)
+       call    *__imp_RtlVirtualUnwind(%rip)
+
+       mov     \$1,%eax                # ExceptionContinueSearch
+       add     \$64,%rsp
+       popfq
+       pop     %r15
+       pop     %r14
+       pop     %r13
+       pop     %r12
+       pop     %rbp
+       pop     %rbx
+       pop     %rdi
+       pop     %rsi
+       ret
+.size  se_handler,.-se_handler
+
+.type  ssse3_handler,\@abi-omnipotent
+.align 16
+ssse3_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lcommon_seh_tail
+
+       mov     192($context),%rax      # pull context->R9
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lcommon_seh_tail
+
+       lea     -0x28(%rax),%rsi
+       lea     512($context),%rdi      # &context.Xmm6
+       mov     \$4,%ecx
+       .long   0xa548f3fc              # cld; rep movsq
+
+       jmp     .Lcommon_seh_tail
+.size  ssse3_handler,.-ssse3_handler
+
+.type  full_handler,\@abi-omnipotent
+.align 16
+full_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lcommon_seh_tail
+
+       mov     192($context),%rax      # pull context->R9
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lcommon_seh_tail
+
+       lea     -0xa8(%rax),%rsi
+       lea     512($context),%rdi      # &context.Xmm6
+       mov     \$20,%ecx
+       .long   0xa548f3fc              # cld; rep movsq
+
+       jmp     .Lcommon_seh_tail
+.size  full_handler,.-full_handler
+
+.section       .pdata
+.align 4
+       .rva    .LSEH_begin_ChaCha20_ctr32
+       .rva    .LSEH_end_ChaCha20_ctr32
+       .rva    .LSEH_info_ChaCha20_ctr32
+
+       .rva    .LSEH_begin_ChaCha20_ssse3
+       .rva    .LSEH_end_ChaCha20_ssse3
+       .rva    .LSEH_info_ChaCha20_ssse3
+
+       .rva    .LSEH_begin_ChaCha20_4x
+       .rva    .LSEH_end_ChaCha20_4x
+       .rva    .LSEH_info_ChaCha20_4x
+___
+$code.=<<___ if ($avx);
+       .rva    .LSEH_begin_ChaCha20_4xop
+       .rva    .LSEH_end_ChaCha20_4xop
+       .rva    .LSEH_info_ChaCha20_4xop
+___
+$code.=<<___ if ($avx>1);
+       .rva    .LSEH_begin_ChaCha20_8x
+       .rva    .LSEH_end_ChaCha20_8x
+       .rva    .LSEH_info_ChaCha20_8x
+___
+$code.=<<___ if ($avx>2);
+       .rva    .LSEH_begin_ChaCha20_avx512
+       .rva    .LSEH_end_ChaCha20_avx512
+       .rva    .LSEH_info_ChaCha20_avx512
+
+       .rva    .LSEH_begin_ChaCha20_16x
+       .rva    .LSEH_end_ChaCha20_16x
+       .rva    .LSEH_info_ChaCha20_16x
+___
+$code.=<<___;
+.section       .xdata
+.align 8
+.LSEH_info_ChaCha20_ctr32:
+       .byte   9,0,0,0
+       .rva    se_handler
+
+.LSEH_info_ChaCha20_ssse3:
+       .byte   9,0,0,0
+       .rva    ssse3_handler
+       .rva    .Lssse3_body,.Lssse3_epilogue
+
+.LSEH_info_ChaCha20_4x:
+       .byte   9,0,0,0
+       .rva    full_handler
+       .rva    .L4x_body,.L4x_epilogue
+___
+$code.=<<___ if ($avx);
+.LSEH_info_ChaCha20_4xop:
+       .byte   9,0,0,0
+       .rva    full_handler
+       .rva    .L4xop_body,.L4xop_epilogue             # HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_ChaCha20_8x:
+       .byte   9,0,0,0
+       .rva    full_handler
+       .rva    .L8x_body,.L8x_epilogue                 # HandlerData[]
+___
+$code.=<<___ if ($avx>2);
+.LSEH_info_ChaCha20_avx512:
+       .byte   9,0,0,0
+       .rva    ssse3_handler
+       .rva    .Lavx512_body,.Lavx512_epilogue         # HandlerData[]
+
+.LSEH_info_ChaCha20_16x:
+       .byte   9,0,0,0
+       .rva    full_handler
+       .rva    .L16x_body,.L16x_epilogue               # HandlerData[]
+___
+}
+
 foreach (split("\n",$code)) {
-       s/\`([^\`]*)\`/eval $1/geo;
+       s/\`([^\`]*)\`/eval $1/ge;
 
-       s/%x#%y/%x/go;
+       s/%x#%[yz]/%x/g;        # "down-shift"
 
        print $_,"\n";
 }