bn/asm/x86_64-mont.pl: fix for CVE-2016-7055 (Low severity).
[openssl.git] / crypto / bn / asm / x86_64-mont.pl
index 5d79b35e1cf2145e9f7b5f519f9a7e059a93d672..df4cca5bfebf0cc82a35c5951d3d379d36b11bdb 100755 (executable)
@@ -1,4 +1,11 @@
-#!/usr/bin/env perl
+#! /usr/bin/env perl
+# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
 
 # ====================================================================
 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
 
+# June 2013.
+#
+# Optimize reduction in squaring procedure and improve 1024+-bit RSA
+# sign performance by 10-16% on Intel Sandy Bridge and later
+# (virtually same on non-Intel processors).
+
+# August 2013.
+#
+# Add MULX/ADOX/ADCX code path.
+
 $flavour = shift;
 $output  = shift;
 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
@@ -40,7 +57,28 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
 die "can't locate x86_64-xlate.pl";
 
-open STDOUT,"| $^X $xlate $flavour $output";
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+               =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+       $addx = ($1>=2.23);
+}
+
+if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+           `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+       $addx = ($1>=2.10);
+}
+
+if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+           `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+       $addx = ($1>=12);
+}
+
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
+       my $ver = $2 + $3/100.0;        # 3.1->3.01, 3.10->3.10
+       $addx = ($ver>=3.03);
+}
 
 # int bn_mul_mont(
 $rp="%rdi";    # BN_ULONG *rp,
@@ -60,17 +98,28 @@ $m1="%rbp";
 $code=<<___;
 .text
 
+.extern        OPENSSL_ia32cap_P
+
 .globl bn_mul_mont
 .type  bn_mul_mont,\@function,6
 .align 16
 bn_mul_mont:
+       mov     ${num}d,${num}d
+       mov     %rsp,%rax
        test    \$3,${num}d
        jnz     .Lmul_enter
        cmp     \$8,${num}d
        jb      .Lmul_enter
+___
+$code.=<<___ if ($addx);
+       mov     OPENSSL_ia32cap_P+8(%rip),%r11d
+___
+$code.=<<___;
        cmp     $ap,$bp
        jne     .Lmul4x_enter
-       jmp     .Lsqr4x_enter
+       test    \$7,${num}d
+       jz      .Lsqr8x_enter
+       jmp     .Lmul4x_enter
 
 .align 16
 .Lmul_enter:
@@ -81,14 +130,37 @@ bn_mul_mont:
        push    %r14
        push    %r15
 
-       mov     ${num}d,${num}d
-       lea     2($num),%r10
+       neg     $num
        mov     %rsp,%r11
-       neg     %r10
-       lea     (%rsp,%r10,8),%rsp      # tp=alloca(8*(num+2))
-       and     \$-1024,%rsp            # minimize TLB usage
+       lea     -16(%rsp,$num,8),%r10   # future alloca(8*(num+2))
+       neg     $num                    # restore $num
+       and     \$-1024,%r10            # minimize TLB usage
+
+       # An OS-agnostic version of __chkstk.
+       #
+       # Some OSes (Windows) insist on stack being "wired" to
+       # physical memory in strictly sequential manner, i.e. if stack
+       # allocation spans two pages, then reference to farmost one can
+       # be punishable by SEGV. But page walking can do good even on
+       # other OSes, because it guarantees that villain thread hits
+       # the guard page before it can make damage to innocent one...
+       sub     %r10,%r11
+       and     \$-4096,%r11
+       lea     (%r10,%r11),%rsp
+       mov     (%rsp),%r11
+       cmp     %r10,%rsp
+       ja      .Lmul_page_walk
+       jmp     .Lmul_page_walk_done
 
-       mov     %r11,8(%rsp,$num,8)     # tp[num+1]=%rsp
+.align 16
+.Lmul_page_walk:
+       lea     -4096(%rsp),%rsp
+       mov     (%rsp),%r11
+       cmp     %r10,%rsp
+       ja      .Lmul_page_walk
+.Lmul_page_walk_done:
+
+       mov     %rax,8(%rsp,$num,8)     # tp[num+1]=%rsp
 .Lmul_body:
        mov     $bp,%r12                # reassign $bp
 ___
@@ -226,7 +298,7 @@ $code.=<<___;
 
        lea     1($i),$i                # i++
        cmp     $num,$i
-       jl      .Louter
+       jb      .Louter
 
        xor     $i,$i                   # i=0 and clear CF!
        mov     (%rsp),%rax             # tp[0]
@@ -260,13 +332,13 @@ $code.=<<___;
 
        mov     8(%rsp,$num,8),%rsi     # restore %rsp
        mov     \$1,%rax
-       mov     (%rsi),%r15
-       mov     8(%rsi),%r14
-       mov     16(%rsi),%r13
-       mov     24(%rsi),%r12
-       mov     32(%rsi),%rbp
-       mov     40(%rsi),%rbx
-       lea     48(%rsi),%rsp
+       mov     -48(%rsi),%r15
+       mov     -40(%rsi),%r14
+       mov     -32(%rsi),%r13
+       mov     -24(%rsi),%r12
+       mov     -16(%rsi),%rbp
+       mov     -8(%rsi),%rbx
+       lea     (%rsi),%rsp
 .Lmul_epilogue:
        ret
 .size  bn_mul_mont,.-bn_mul_mont
@@ -278,7 +350,16 @@ $code.=<<___;
 .type  bn_mul4x_mont,\@function,6
 .align 16
 bn_mul4x_mont:
+       mov     ${num}d,${num}d
+       mov     %rsp,%rax
 .Lmul4x_enter:
+___
+$code.=<<___ if ($addx);
+       and     \$0x80100,%r11d
+       cmp     \$0x80100,%r11d
+       je      .Lmulx4x_enter
+___
+$code.=<<___;
        push    %rbx
        push    %rbp
        push    %r12
@@ -286,14 +367,28 @@ bn_mul4x_mont:
        push    %r14
        push    %r15
 
-       mov     ${num}d,${num}d
-       lea     4($num),%r10
+       neg     $num
        mov     %rsp,%r11
-       neg     %r10
-       lea     (%rsp,%r10,8),%rsp      # tp=alloca(8*(num+4))
-       and     \$-1024,%rsp            # minimize TLB usage
-
-       mov     %r11,8(%rsp,$num,8)     # tp[num+1]=%rsp
+       lea     -32(%rsp,$num,8),%r10   # future alloca(8*(num+4))
+       neg     $num                    # restore
+       and     \$-1024,%r10            # minimize TLB usage
+
+       sub     %r10,%r11
+       and     \$-4096,%r11
+       lea     (%r10,%r11),%rsp
+       mov     (%rsp),%r11
+       cmp     %r10,%rsp
+       ja      .Lmul4x_page_walk
+       jmp     .Lmul4x_page_walk_done
+
+.Lmul4x_page_walk:
+       lea     -4096(%rsp),%rsp
+       mov     (%rsp),%r11
+       cmp     %r10,%rsp
+       ja      .Lmul4x_page_walk
+.Lmul4x_page_walk_done:
+
+       mov     %rax,8(%rsp,$num,8)     # tp[num+1]=%rsp
 .Lmul4x_body:
        mov     $rp,16(%rsp,$num,8)     # tp[num+2]=$rp
        mov     %rdx,%r12               # reassign $bp
@@ -400,7 +495,7 @@ $code.=<<___;
        mov     $N[1],-32(%rsp,$j,8)    # tp[j-1]
        mov     %rdx,$N[0]
        cmp     $num,$j
-       jl      .L1st4x
+       jb      .L1st4x
 
        mulq    $m0                     # ap[j]*bp[0]
        add     %rax,$A[0]
@@ -548,7 +643,7 @@ $code.=<<___;
        mov     $N[1],-32(%rsp,$j,8)    # tp[j-1]
        mov     %rdx,$N[0]
        cmp     $num,$j
-       jl      .Linner4x
+       jb      .Linner4x
 
        mulq    $m0                     # ap[j]*bp[i]
        add     %rax,$A[0]
@@ -594,7 +689,7 @@ $code.=<<___;
        mov     $N[1],(%rsp,$j,8)       # store upmost overflow bit
 
        cmp     $num,$i
-       jl      .Louter4x
+       jb      .Louter4x
 ___
 {
 my @ri=("%rax","%rdx",$m0,$m1);
@@ -673,13 +768,13 @@ ___
 $code.=<<___;
        mov     8(%rsp,$num,8),%rsi     # restore %rsp
        mov     \$1,%rax
-       mov     (%rsi),%r15
-       mov     8(%rsi),%r14
-       mov     16(%rsi),%r13
-       mov     24(%rsi),%r12
-       mov     32(%rsi),%rbp
-       mov     40(%rsi),%rbx
-       lea     48(%rsi),%rsp
+       mov     -48(%rsi),%r15
+       mov     -40(%rsi),%r14
+       mov     -32(%rsi),%r13
+       mov     -24(%rsi),%r12
+       mov     -16(%rsi),%rbp
+       mov     -8(%rsi),%rbx
+       lea     (%rsi),%rsp
 .Lmul4x_epilogue:
        ret
 .size  bn_mul4x_mont,.-bn_mul4x_mont
@@ -687,813 +782,548 @@ ___
 }}}
 \f{{{
 ######################################################################
-# void bn_sqr4x_mont(
+# void bn_sqr8x_mont(
 my $rptr="%rdi";       # const BN_ULONG *rptr,
 my $aptr="%rsi";       # const BN_ULONG *aptr,
 my $bptr="%rdx";       # not used
 my $nptr="%rcx";       # const BN_ULONG *nptr,
 my $n0  ="%r8";                # const BN_ULONG *n0);
-my $num ="%r9";                # int num, has to be divisible by 4 and
-                       # not less than 8
+my $num ="%r9";                # int num, has to be divisible by 8
 
 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
 my @A0=("%r10","%r11");
 my @A1=("%r12","%r13");
 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
 
+$code.=<<___   if ($addx);
+.extern        bn_sqrx8x_internal              # see x86_64-mont5 module
+___
 $code.=<<___;
-.type  bn_sqr4x_mont,\@function,6
-.align 16
-bn_sqr4x_mont:
-.Lsqr4x_enter:
+.extern        bn_sqr8x_internal               # see x86_64-mont5 module
+
+.type  bn_sqr8x_mont,\@function,6
+.align 32
+bn_sqr8x_mont:
+       mov     %rsp,%rax
+.Lsqr8x_enter:
        push    %rbx
        push    %rbp
        push    %r12
        push    %r13
        push    %r14
        push    %r15
+.Lsqr8x_prologue:
 
+       mov     ${num}d,%r10d
        shl     \$3,${num}d             # convert $num to bytes
-       xor     %r10,%r10
-       mov     %rsp,%r11               # put aside %rsp
-       sub     $num,%r10               # -$num
-       mov     ($n0),$n0               # *n0
-       lea     -72(%rsp,%r10,2),%rsp   # alloca(frame+2*$num)
-       and     \$-1024,%rsp            # minimize TLB usage
-       ##############################################################
-       # Stack layout
-       #
-       # +0    saved $num, used in reduction section
-       # +8    &t[2*$num], used in reduction section
-       # +32   saved $rptr
-       # +40   saved $nptr
-       # +48   saved *n0
-       # +56   saved %rsp
-       # +64   t[2*$num]
-       #
-       mov     $rptr,32(%rsp)          # save $rptr
-       mov     $nptr,40(%rsp)
-       mov     $n0,  48(%rsp)
-       mov     %r11, 56(%rsp)          # save original %rsp
-.Lsqr4x_body:
+       shl     \$3+2,%r10              # 4*$num
+       neg     $num
+
        ##############################################################
-       # Squaring part:
-       #
-       # a) multiply-n-add everything but a[i]*a[i];
-       # b) shift result of a) by 1 to the left and accumulate
-       #    a[i]*a[i] products;
+       # ensure that stack frame doesn't alias with $aptr modulo
+       # 4096. this is done to allow memory disambiguation logic
+       # do its job.
        #
-       lea     32(%r10),$i             # $i=-($num-32)
-       lea     ($aptr,$num),$aptr      # end of a[] buffer, ($aptr,$i)=&ap[2]
-
-       mov     $num,$j                 # $j=$num
-
-                                       # comments apply to $num==8 case
-       mov     -32($aptr,$i),$a0       # a[0]
-       lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
-       mov     -24($aptr,$i),%rax      # a[1]
-       lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
-       mov     -16($aptr,$i),$ai       # a[2]
-       mov     %rax,$a1
-
-       mul     $a0                     # a[1]*a[0]
-       mov     %rax,$A0[0]             # a[1]*a[0]
-        mov    $ai,%rax                # a[2]
-       mov     %rdx,$A0[1]
-       mov     $A0[0],-24($tptr,$i)    # t[1]
-
-       xor     $A0[0],$A0[0]
-       mul     $a0                     # a[2]*a[0]
-       add     %rax,$A0[1]
-        mov    $ai,%rax
-       adc     %rdx,$A0[0]
-       mov     $A0[1],-16($tptr,$i)    # t[2]
-
-       lea     -16($i),$j              # j=-16
-
-
-        mov    8($aptr,$j),$ai         # a[3]
-       mul     $a1                     # a[2]*a[1]
-       mov     %rax,$A1[0]             # a[2]*a[1]+t[3]
-        mov    $ai,%rax
-       mov     %rdx,$A1[1]
-
-       xor     $A0[1],$A0[1]
-       add     $A1[0],$A0[0]
-        lea    16($j),$j
-       adc     \$0,$A0[1]
-       mul     $a0                     # a[3]*a[0]
-       add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
-        mov    $ai,%rax
-       adc     %rdx,$A0[1]
-       mov     $A0[0],-8($tptr,$j)     # t[3]
-       jmp     .Lsqr4x_1st
+       lea     -64(%rsp,$num,2),%r11
+       mov     %rsp,%rbp
+       mov     ($n0),$n0               # *n0
+       sub     $aptr,%r11
+       and     \$4095,%r11
+       cmp     %r11,%r10
+       jb      .Lsqr8x_sp_alt
+       sub     %r11,%rbp               # align with $aptr
+       lea     -64(%rbp,$num,2),%rbp   # future alloca(frame+2*$num)
+       jmp     .Lsqr8x_sp_done
+
+.align 32
+.Lsqr8x_sp_alt:
+       lea     4096-64(,$num,2),%r10   # 4096-frame-2*$num
+       lea     -64(%rbp,$num,2),%rbp   # future alloca(frame+2*$num)
+       sub     %r10,%r11
+       mov     \$0,%r10
+       cmovc   %r10,%r11
+       sub     %r11,%rbp
+.Lsqr8x_sp_done:
+       and     \$-64,%rbp
+       mov     %rsp,%r11
+       sub     %rbp,%r11
+       and     \$-4096,%r11
+       lea     (%rbp,%r11),%rsp
+       mov     (%rsp),%r10
+       cmp     %rbp,%rsp
+       ja      .Lsqr8x_page_walk
+       jmp     .Lsqr8x_page_walk_done
 
 .align 16
-.Lsqr4x_1st:
-        mov    ($aptr,$j),$ai          # a[4]
-       xor     $A1[0],$A1[0]
-       mul     $a1                     # a[3]*a[1]
-       add     %rax,$A1[1]             # a[3]*a[1]+t[4]
-        mov    $ai,%rax
-       adc     %rdx,$A1[0]
-
-       xor     $A0[0],$A0[0]
-       add     $A1[1],$A0[1]
-       adc     \$0,$A0[0]
-       mul     $a0                     # a[4]*a[0]
-       add     %rax,$A0[1]             # a[4]*a[0]+a[3]*a[1]+t[4]
-        mov    $ai,%rax                # a[3]
-       adc     %rdx,$A0[0]
-       mov     $A0[1],($tptr,$j)       # t[4]
-
-
-        mov    8($aptr,$j),$ai         # a[5]
-       xor     $A1[1],$A1[1]
-       mul     $a1                     # a[4]*a[3]
-       add     %rax,$A1[0]             # a[4]*a[3]+t[5]
-        mov    $ai,%rax
-       adc     %rdx,$A1[1]
-
-       xor     $A0[1],$A0[1]
-       add     $A1[0],$A0[0]
-       adc     \$0,$A0[1]
-       mul     $a0                     # a[5]*a[2]
-       add     %rax,$A0[0]             # a[5]*a[2]+a[4]*a[3]+t[5]
-        mov    $ai,%rax
-       adc     %rdx,$A0[1]
-       mov     $A0[0],8($tptr,$j)      # t[5]
-
-        mov    16($aptr,$j),$ai        # a[6]
-       xor     $A1[0],$A1[0]
-       mul     $a1                     # a[5]*a[3]
-       add     %rax,$A1[1]             # a[5]*a[3]+t[6]
-        mov    $ai,%rax
-       adc     %rdx,$A1[0]
-
-       xor     $A0[0],$A0[0]
-       add     $A1[1],$A0[1]
-       adc     \$0,$A0[0]
-       mul     $a0                     # a[6]*a[2]
-       add     %rax,$A0[1]             # a[6]*a[2]+a[5]*a[3]+t[6]
-        mov    $ai,%rax                # a[3]
-       adc     %rdx,$A0[0]
-       mov     $A0[1],16($tptr,$j)     # t[6]
-
-
-        mov    24($aptr,$j),$ai        # a[7]
-       xor     $A1[1],$A1[1]
-       mul     $a1                     # a[6]*a[5]
-       add     %rax,$A1[0]             # a[6]*a[5]+t[7]
-        mov    $ai,%rax
-       adc     %rdx,$A1[1]
-
-       xor     $A0[1],$A0[1]
-       add     $A1[0],$A0[0]
-        lea    32($j),$j
-       adc     \$0,$A0[1]
-       mul     $a0                     # a[7]*a[4]
-       add     %rax,$A0[0]             # a[7]*a[4]+a[6]*a[5]+t[6]
-        mov    $ai,%rax
-       adc     %rdx,$A0[1]
-       mov     $A0[0],-8($tptr,$j)     # t[7]
-
-       cmp     \$0,$j
-       jne     .Lsqr4x_1st
-
-       xor     $A1[0],$A1[0]
-       add     $A0[1],$A1[1]
-       adc     \$0,$A1[0]
-       mul     $a1                     # a[7]*a[5]
-       add     %rax,$A1[1]
-       adc     %rdx,$A1[0]
-
-       mov     $A1[1],($tptr)          # t[8]
-       lea     16($i),$i
-       mov     $A1[0],8($tptr)         # t[9]
-       jmp     .Lsqr4x_outer
+.Lsqr8x_page_walk:
+       lea     -4096(%rsp),%rsp
+       mov     (%rsp),%r10
+       cmp     %rbp,%rsp
+       ja      .Lsqr8x_page_walk
+.Lsqr8x_page_walk_done:
 
-.align 16
-.Lsqr4x_outer:                         # comments apply to $num==6 case
-       mov     -32($aptr,$i),$a0       # a[0]
-       lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
-       mov     -24($aptr,$i),%rax      # a[1]
-       lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
-       mov     -16($aptr,$i),$ai       # a[2]
-       mov     %rax,$a1
-
-       mov     -24($tptr,$i),$A0[0]    # t[1]
-       xor     $A0[1],$A0[1]
-       mul     $a0                     # a[1]*a[0]
-       add     %rax,$A0[0]             # a[1]*a[0]+t[1]
-        mov    $ai,%rax                # a[2]
-       adc     %rdx,$A0[1]
-       mov     $A0[0],-24($tptr,$i)    # t[1]
-
-       xor     $A0[0],$A0[0]
-       add     -16($tptr,$i),$A0[1]    # a[2]*a[0]+t[2]
-       adc     \$0,$A0[0]
-       mul     $a0                     # a[2]*a[0]
-       add     %rax,$A0[1]
-        mov    $ai,%rax
-       adc     %rdx,$A0[0]
-       mov     $A0[1],-16($tptr,$i)    # t[2]
-
-       lea     -16($i),$j              # j=-16
-       xor     $A1[0],$A1[0]
-
-
-        mov    8($aptr,$j),$ai         # a[3]
-       xor     $A1[1],$A1[1]
-       add     8($tptr,$j),$A1[0]
-       adc     \$0,$A1[1]
-       mul     $a1                     # a[2]*a[1]
-       add     %rax,$A1[0]             # a[2]*a[1]+t[3]
-        mov    $ai,%rax
-       adc     %rdx,$A1[1]
-
-       xor     $A0[1],$A0[1]
-       add     $A1[0],$A0[0]
-       adc     \$0,$A0[1]
-       mul     $a0                     # a[3]*a[0]
-       add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
-        mov    $ai,%rax
-       adc     %rdx,$A0[1]
-       mov     $A0[0],8($tptr,$j)      # t[3]
-
-       lea     16($j),$j
-       jmp     .Lsqr4x_inner
+       mov     $num,%r10
+       neg     $num
 
-.align 16
-.Lsqr4x_inner:
-        mov    ($aptr,$j),$ai          # a[4]
-       xor     $A1[0],$A1[0]
-       add     ($tptr,$j),$A1[1]
-       adc     \$0,$A1[0]
-       mul     $a1                     # a[3]*a[1]
-       add     %rax,$A1[1]             # a[3]*a[1]+t[4]
-        mov    $ai,%rax
-       adc     %rdx,$A1[0]
-
-       xor     $A0[0],$A0[0]
-       add     $A1[1],$A0[1]
-       adc     \$0,$A0[0]
-       mul     $a0                     # a[4]*a[0]
-       add     %rax,$A0[1]             # a[4]*a[0]+a[3]*a[1]+t[4]
-        mov    $ai,%rax                # a[3]
-       adc     %rdx,$A0[0]
-       mov     $A0[1],($tptr,$j)       # t[4]
-
-        mov    8($aptr,$j),$ai         # a[5]
-       xor     $A1[1],$A1[1]
-       add     8($tptr,$j),$A1[0]
-       adc     \$0,$A1[1]
-       mul     $a1                     # a[4]*a[3]
-       add     %rax,$A1[0]             # a[4]*a[3]+t[5]
-        mov    $ai,%rax
-       adc     %rdx,$A1[1]
-
-       xor     $A0[1],$A0[1]
-       add     $A1[0],$A0[0]
-       lea     16($j),$j               # j++
-       adc     \$0,$A0[1]
-       mul     $a0                     # a[5]*a[2]
-       add     %rax,$A0[0]             # a[5]*a[2]+a[4]*a[3]+t[5]
-        mov    $ai,%rax
-       adc     %rdx,$A0[1]
-       mov     $A0[0],-8($tptr,$j)     # t[5], "preloaded t[1]" below
-
-       cmp     \$0,$j
-       jne     .Lsqr4x_inner
-
-       xor     $A1[0],$A1[0]
-       add     $A0[1],$A1[1]
-       adc     \$0,$A1[0]
-       mul     $a1                     # a[5]*a[3]
-       add     %rax,$A1[1]
-       adc     %rdx,$A1[0]
-
-       mov     $A1[1],($tptr)          # t[6], "preloaded t[2]" below
-       mov     $A1[0],8($tptr)         # t[7], "preloaded t[3]" below
-
-       add     \$16,$i
-       jnz     .Lsqr4x_outer
-
-                                       # comments apply to $num==4 case
-       mov     -32($aptr),$a0          # a[0]
-       lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
-       mov     -24($aptr),%rax         # a[1]
-       lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
-       mov     -16($aptr),$ai          # a[2]
-       mov     %rax,$a1
-
-       xor     $A0[1],$A0[1]
-       mul     $a0                     # a[1]*a[0]
-       add     %rax,$A0[0]             # a[1]*a[0]+t[1], preloaded t[1]
-        mov    $ai,%rax                # a[2]
-       adc     %rdx,$A0[1]
-       mov     $A0[0],-24($tptr)       # t[1]
-
-       xor     $A0[0],$A0[0]
-       add     $A1[1],$A0[1]           # a[2]*a[0]+t[2], preloaded t[2]
-       adc     \$0,$A0[0]
-       mul     $a0                     # a[2]*a[0]
-       add     %rax,$A0[1]
-        mov    $ai,%rax
-       adc     %rdx,$A0[0]
-       mov     $A0[1],-16($tptr)       # t[2]
-
-        mov    -8($aptr),$ai           # a[3]
-       mul     $a1                     # a[2]*a[1]
-       add     %rax,$A1[0]             # a[2]*a[1]+t[3], preloaded t[3]
-        mov    $ai,%rax
-       adc     \$0,%rdx
-
-       xor     $A0[1],$A0[1]
-       add     $A1[0],$A0[0]
-        mov    %rdx,$A1[1]
-       adc     \$0,$A0[1]
-       mul     $a0                     # a[3]*a[0]
-       add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
-        mov    $ai,%rax
-       adc     %rdx,$A0[1]
-       mov     $A0[0],-8($tptr)        # t[3]
-
-       xor     $A1[0],$A1[0]
-       add     $A0[1],$A1[1]
-       adc     \$0,$A1[0]
-       mul     $a1                     # a[3]*a[1]
-       add     %rax,$A1[1]
-        mov    -16($aptr),%rax         # a[2]
-       adc     %rdx,$A1[0]
-
-       mov     $A1[1],($tptr)          # t[4]
-       mov     $A1[0],8($tptr)         # t[5]
-
-       mul     $ai                     # a[2]*a[3]
+       mov     $n0,  32(%rsp)
+       mov     %rax, 40(%rsp)          # save original %rsp
+.Lsqr8x_body:
+
+       movq    $nptr, %xmm2            # save pointer to modulus
+       pxor    %xmm0,%xmm0
+       movq    $rptr,%xmm1             # save $rptr
+       movq    %r10, %xmm3             # -$num
+___
+$code.=<<___ if ($addx);
+       mov     OPENSSL_ia32cap_P+8(%rip),%eax
+       and     \$0x80100,%eax
+       cmp     \$0x80100,%eax
+       jne     .Lsqr8x_nox
+
+       call    bn_sqrx8x_internal      # see x86_64-mont5 module
+                                       # %rax  top-most carry
+                                       # %rbp  nptr
+                                       # %rcx  -8*num
+                                       # %r8   end of tp[2*num]
+       lea     (%r8,%rcx),%rbx
+       mov     %rcx,$num
+       mov     %rcx,%rdx
+       movq    %xmm1,$rptr
+       sar     \$3+2,%rcx              # %cf=0
+       jmp     .Lsqr8x_sub
+
+.align 32
+.Lsqr8x_nox:
 ___
-{
-my ($shift,$carry)=($a0,$a1);
-my @S=(@A1,$ai,$n0);
 $code.=<<___;
-        add    \$16,$i
-        xor    $shift,$shift
-        sub    $num,$i                 # $i=16-$num
-        xor    $carry,$carry
-
-       add     $A1[0],%rax             # t[5]
-       adc     \$0,%rdx
-       mov     %rax,8($tptr)           # t[5]
-       mov     %rdx,16($tptr)          # t[6]
-       mov     $carry,24($tptr)        # t[7]
-
-        mov    -16($aptr,$i),%rax      # a[0]
-       lea     64(%rsp,$num,2),$tptr
-        xor    $A0[0],$A0[0]           # t[0]
-        mov    -24($tptr,$i,2),$A0[1]  # t[1]
-
-       lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[1]            # | t[2*i]>>63
-        mov    -16($tptr,$i,2),$A0[0]  # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    -8($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[0]
-        mov    -8($aptr,$i),%rax       # a[i+1]        # prefetch
-       mov     $S[0],-32($tptr,$i,2)
-       adc     %rdx,$S[1]
-
-       lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
-        mov    $S[1],-24($tptr,$i,2)
-        sbb    $carry,$carry           # mov cf,$carry
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[3]            # | t[2*i]>>63
-        mov    0($tptr,$i,2),$A0[0]    # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    8($tptr,$i,2),$A0[1]    # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[2]
-        mov    0($aptr,$i),%rax        # a[i+1]        # prefetch
-       mov     $S[2],-16($tptr,$i,2)
-       adc     %rdx,$S[3]
-       lea     16($i),$i
-       mov     $S[3],-40($tptr,$i,2)
-       sbb     $carry,$carry           # mov cf,$carry
-       jmp     .Lsqr4x_shift_n_add
+       call    bn_sqr8x_internal       # see x86_64-mont5 module
+                                       # %rax  top-most carry
+                                       # %rbp  nptr
+                                       # %r8   -8*num
+                                       # %rdi  end of tp[2*num]
+       lea     (%rdi,$num),%rbx
+       mov     $num,%rcx
+       mov     $num,%rdx
+       movq    %xmm1,$rptr
+       sar     \$3+2,%rcx              # %cf=0
+       jmp     .Lsqr8x_sub
+
+.align 32
+.Lsqr8x_sub:
+       mov     8*0(%rbx),%r12
+       mov     8*1(%rbx),%r13
+       mov     8*2(%rbx),%r14
+       mov     8*3(%rbx),%r15
+       lea     8*4(%rbx),%rbx
+       sbb     8*0(%rbp),%r12
+       sbb     8*1(%rbp),%r13
+       sbb     8*2(%rbp),%r14
+       sbb     8*3(%rbp),%r15
+       lea     8*4(%rbp),%rbp
+       mov     %r12,8*0($rptr)
+       mov     %r13,8*1($rptr)
+       mov     %r14,8*2($rptr)
+       mov     %r15,8*3($rptr)
+       lea     8*4($rptr),$rptr
+       inc     %rcx                    # preserves %cf
+       jnz     .Lsqr8x_sub
+
+       sbb     \$0,%rax                # top-most carry
+       lea     (%rbx,$num),%rbx        # rewind
+       lea     ($rptr,$num),$rptr      # rewind
+
+       movq    %rax,%xmm1
+       pxor    %xmm0,%xmm0
+       pshufd  \$0,%xmm1,%xmm1
+       mov     40(%rsp),%rsi           # restore %rsp
+       jmp     .Lsqr8x_cond_copy
+
+.align 32
+.Lsqr8x_cond_copy:
+       movdqa  16*0(%rbx),%xmm2
+       movdqa  16*1(%rbx),%xmm3
+       lea     16*2(%rbx),%rbx
+       movdqu  16*0($rptr),%xmm4
+       movdqu  16*1($rptr),%xmm5
+       lea     16*2($rptr),$rptr
+       movdqa  %xmm0,-16*2(%rbx)       # zero tp
+       movdqa  %xmm0,-16*1(%rbx)
+       movdqa  %xmm0,-16*2(%rbx,%rdx)
+       movdqa  %xmm0,-16*1(%rbx,%rdx)
+       pcmpeqd %xmm1,%xmm0
+       pand    %xmm1,%xmm2
+       pand    %xmm1,%xmm3
+       pand    %xmm0,%xmm4
+       pand    %xmm0,%xmm5
+       pxor    %xmm0,%xmm0
+       por     %xmm2,%xmm4
+       por     %xmm3,%xmm5
+       movdqu  %xmm4,-16*2($rptr)
+       movdqu  %xmm5,-16*1($rptr)
+       add     \$32,$num
+       jnz     .Lsqr8x_cond_copy
 
-.align 16
-.Lsqr4x_shift_n_add:
-       lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[1]            # | t[2*i]>>63
-        mov    -16($tptr,$i,2),$A0[0]  # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    -8($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[0]
-        mov    -8($aptr,$i),%rax       # a[i+1]        # prefetch
-       mov     $S[0],-32($tptr,$i,2)
-       adc     %rdx,$S[1]
-
-       lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
-        mov    $S[1],-24($tptr,$i,2)
-        sbb    $carry,$carry           # mov cf,$carry
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[3]            # | t[2*i]>>63
-        mov    0($tptr,$i,2),$A0[0]    # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    8($tptr,$i,2),$A0[1]    # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[2]
-        mov    0($aptr,$i),%rax        # a[i+1]        # prefetch
-       mov     $S[2],-16($tptr,$i,2)
-       adc     %rdx,$S[3]
-
-       lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
-        mov    $S[3],-8($tptr,$i,2)
-        sbb    $carry,$carry           # mov cf,$carry
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[1]            # | t[2*i]>>63
-        mov    16($tptr,$i,2),$A0[0]   # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    24($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[0]
-        mov    8($aptr,$i),%rax        # a[i+1]        # prefetch
-       mov     $S[0],0($tptr,$i,2)
-       adc     %rdx,$S[1]
-
-       lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
-        mov    $S[1],8($tptr,$i,2)
-        sbb    $carry,$carry           # mov cf,$carry
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[3]            # | t[2*i]>>63
-        mov    32($tptr,$i,2),$A0[0]   # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    40($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[2]
-        mov    16($aptr,$i),%rax       # a[i+1]        # prefetch
-       mov     $S[2],16($tptr,$i,2)
-       adc     %rdx,$S[3]
-       mov     $S[3],24($tptr,$i,2)
-       sbb     $carry,$carry           # mov cf,$carry
-       add     \$32,$i
-       jnz     .Lsqr4x_shift_n_add
-
-       lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[1]            # | t[2*i]>>63
-        mov    -16($tptr),$A0[0]       # t[2*i+2]      # prefetch
-       mov     $A0[1],$shift           # shift=t[2*i+1]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-        mov    -8($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
-       adc     %rax,$S[0]
-        mov    -8($aptr),%rax          # a[i+1]        # prefetch
-       mov     $S[0],-32($tptr)
-       adc     %rdx,$S[1]
-
-       lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
-        mov    $S[1],-24($tptr)
-        sbb    $carry,$carry           # mov cf,$carry
-       shr     \$63,$A0[0]
-       lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
-       shr     \$63,$A0[1]
-       or      $A0[0],$S[3]            # | t[2*i]>>63
-       mul     %rax                    # a[i]*a[i]
-       neg     $carry                  # mov $carry,cf
-       adc     %rax,$S[2]
-       adc     %rdx,$S[3]
-       mov     $S[2],-16($tptr)
-       mov     $S[3],-8($tptr)
+       mov     \$1,%rax
+       mov     -48(%rsi),%r15
+       mov     -40(%rsi),%r14
+       mov     -32(%rsi),%r13
+       mov     -24(%rsi),%r12
+       mov     -16(%rsi),%rbp
+       mov     -8(%rsi),%rbx
+       lea     (%rsi),%rsp
+.Lsqr8x_epilogue:
+       ret
+.size  bn_sqr8x_mont,.-bn_sqr8x_mont
 ___
-}\f
-##############################################################
-# Montgomery reduction part, "word-by-word" algorithm.
-#
-{
-my ($topbit,$nptr)=("%rbp",$aptr);
-my ($m0,$m1)=($a0,$a1);
-my @Ni=("%rbx","%r9");
-$code.=<<___;
-       mov     40(%rsp),$nptr          # restore $nptr
-       mov     48(%rsp),$n0            # restore *n0
-       xor     $j,$j
-       mov     $num,0(%rsp)            # save $num
-       sub     $num,$j                 # $j=-$num
-        mov    64(%rsp),$A0[0]         # t[0]          # modsched #
-        mov    $n0,$m0                 #               # modsched #
-       lea     64(%rsp,$num,2),%rax    # end of t[] buffer
-       lea     64(%rsp,$num),$tptr     # end of t[] window
-       mov     %rax,8(%rsp)            # save end of t[] buffer
-       lea     ($nptr,$num),$nptr      # end of n[] buffer
-       xor     $topbit,$topbit         # $topbit=0
-
-       mov     0($nptr,$j),%rax        # n[0]          # modsched #
-       mov     8($nptr,$j),$Ni[1]      # n[1]          # modsched #
-        imulq  $A0[0],$m0              # m0=t[0]*n0    # modsched #
-        mov    %rax,$Ni[0]             #               # modsched #
-       jmp     .Lsqr4x_mont_outer
+}}}
+\f
+if ($addx) {{{
+my $bp="%rdx"; # original value
 
-.align 16
-.Lsqr4x_mont_outer:
-       xor     $A0[1],$A0[1]
-       mul     $m0                     # n[0]*m0
-       add     %rax,$A0[0]             # n[0]*m0+t[0]
-        mov    $Ni[1],%rax
-       adc     %rdx,$A0[1]
-       mov     $n0,$m1
+$code.=<<___;
+.type  bn_mulx4x_mont,\@function,6
+.align 32
+bn_mulx4x_mont:
+       mov     %rsp,%rax
+.Lmulx4x_enter:
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+.Lmulx4x_prologue:
 
-       xor     $A0[0],$A0[0]
-       add     8($tptr,$j),$A0[1]
-       adc     \$0,$A0[0]
-       mul     $m0                     # n[1]*m0
-       add     %rax,$A0[1]             # n[1]*m0+t[1]
-        mov    $Ni[0],%rax
-       adc     %rdx,$A0[0]
-
-       imulq   $A0[1],$m1
-
-       mov     16($nptr,$j),$Ni[0]     # n[2]
-       xor     $A1[1],$A1[1]
-       add     $A0[1],$A1[0]
-       adc     \$0,$A1[1]
-       mul     $m1                     # n[0]*m1
-       add     %rax,$A1[0]             # n[0]*m1+"t[1]"
-        mov    $Ni[0],%rax
-       adc     %rdx,$A1[1]
-       mov     $A1[0],8($tptr,$j)      # "t[1]"
-
-       xor     $A0[1],$A0[1]
-       add     16($tptr,$j),$A0[0]
-       adc     \$0,$A0[1]
-       mul     $m0                     # n[2]*m0
-       add     %rax,$A0[0]             # n[2]*m0+t[2]
-        mov    $Ni[1],%rax
-       adc     %rdx,$A0[1]
-
-       mov     24($nptr,$j),$Ni[1]     # n[3]
-       xor     $A1[0],$A1[0]
-       add     $A0[0],$A1[1]
-       adc     \$0,$A1[0]
-       mul     $m1                     # n[1]*m1
-       add     %rax,$A1[1]             # n[1]*m1+"t[2]"
-        mov    $Ni[1],%rax
-       adc     %rdx,$A1[0]
-       mov     $A1[1],16($tptr,$j)     # "t[2]"
-
-       xor     $A0[0],$A0[0]
-       add     24($tptr,$j),$A0[1]
-       lea     32($j),$j
-       adc     \$0,$A0[0]
-       mul     $m0                     # n[3]*m0
-       add     %rax,$A0[1]             # n[3]*m0+t[3]
-        mov    $Ni[0],%rax
-       adc     %rdx,$A0[0]
-       jmp     .Lsqr4x_mont_inner
+       shl     \$3,${num}d             # convert $num to bytes
+       xor     %r10,%r10
+       sub     $num,%r10               # -$num
+       mov     ($n0),$n0               # *n0
+       lea     -72(%rsp,%r10),%rbp     # future alloca(frame+$num+8)
+       and     \$-128,%rbp
+       mov     %rsp,%r11
+       sub     %rbp,%r11
+       and     \$-4096,%r11
+       lea     (%rbp,%r11),%rsp
+       mov     (%rsp),%r10
+       cmp     %rbp,%rsp
+       ja      .Lmulx4x_page_walk
+       jmp     .Lmulx4x_page_walk_done
 
 .align 16
-.Lsqr4x_mont_inner:
-       mov     ($nptr,$j),$Ni[0]       # n[4]
-       xor     $A1[1],$A1[1]
-       add     $A0[1],$A1[0]
-       adc     \$0,$A1[1]
-       mul     $m1                     # n[2]*m1
-       add     %rax,$A1[0]             # n[2]*m1+"t[3]"
-        mov    $Ni[0],%rax
-       adc     %rdx,$A1[1]
-       mov     $A1[0],-8($tptr,$j)     # "t[3]"
-
-       xor     $A0[1],$A0[1]
-       add     ($tptr,$j),$A0[0]
-       adc     \$0,$A0[1]
-       mul     $m0                     # n[4]*m0
-       add     %rax,$A0[0]             # n[4]*m0+t[4]
-        mov    $Ni[1],%rax
-       adc     %rdx,$A0[1]
-
-       mov     8($nptr,$j),$Ni[1]      # n[5]
-       xor     $A1[0],$A1[0]
-       add     $A0[0],$A1[1]
-       adc     \$0,$A1[0]
-       mul     $m1                     # n[3]*m1
-       add     %rax,$A1[1]             # n[3]*m1+"t[4]"
-        mov    $Ni[1],%rax
-       adc     %rdx,$A1[0]
-       mov     $A1[1],($tptr,$j)       # "t[4]"
-
-       xor     $A0[0],$A0[0]
-       add     8($tptr,$j),$A0[1]
-       adc     \$0,$A0[0]
-       mul     $m0                     # n[5]*m0
-       add     %rax,$A0[1]             # n[5]*m0+t[5]
-        mov    $Ni[0],%rax
-       adc     %rdx,$A0[0]
-
-
-       mov     16($nptr,$j),$Ni[0]     # n[6]
-       xor     $A1[1],$A1[1]
-       add     $A0[1],$A1[0]
-       adc     \$0,$A1[1]
-       mul     $m1                     # n[4]*m1
-       add     %rax,$A1[0]             # n[4]*m1+"t[5]"
-        mov    $Ni[0],%rax
-       adc     %rdx,$A1[1]
-       mov     $A1[0],8($tptr,$j)      # "t[5]"
-
-       xor     $A0[1],$A0[1]
-       add     16($tptr,$j),$A0[0]
-       adc     \$0,$A0[1]
-       mul     $m0                     # n[6]*m0
-       add     %rax,$A0[0]             # n[6]*m0+t[6]
-        mov    $Ni[1],%rax
-       adc     %rdx,$A0[1]
-
-       mov     24($nptr,$j),$Ni[1]     # n[7]
-       xor     $A1[0],$A1[0]
-       add     $A0[0],$A1[1]
-       adc     \$0,$A1[0]
-       mul     $m1                     # n[5]*m1
-       add     %rax,$A1[1]             # n[5]*m1+"t[6]"
-        mov    $Ni[1],%rax
-       adc     %rdx,$A1[0]
-       mov     $A1[1],16($tptr,$j)     # "t[6]"
-
-       xor     $A0[0],$A0[0]
-       add     24($tptr,$j),$A0[1]
-       lea     32($j),$j
-       adc     \$0,$A0[0]
-       mul     $m0                     # n[7]*m0
-       add     %rax,$A0[1]             # n[7]*m0+t[7]
-        mov    $Ni[0],%rax
-       adc     %rdx,$A0[0]
-       cmp     \$0,$j
-       jne     .Lsqr4x_mont_inner
-
-        sub    0(%rsp),$j              # $j=-$num      # modsched #
-        mov    $n0,$m0                 #               # modsched #
-
-       xor     $A1[1],$A1[1]
-       add     $A0[1],$A1[0]
-       adc     \$0,$A1[1]
-       mul     $m1                     # n[6]*m1
-       add     %rax,$A1[0]             # n[6]*m1+"t[7]"
-       mov     $Ni[1],%rax
-       adc     %rdx,$A1[1]
-       mov     $A1[0],-8($tptr)        # "t[7]"
-
-       xor     $A0[1],$A0[1]
-       add     ($tptr),$A0[0]          # +t[8]
-       adc     \$0,$A0[1]
-        mov    0($nptr,$j),$Ni[0]      # n[0]          # modsched #
-       add     $topbit,$A0[0]
-       adc     \$0,$A0[1]
-
-        imulq  16($tptr,$j),$m0        # m0=t[0]*n0    # modsched #
-       xor     $A1[0],$A1[0]
-        mov    8($nptr,$j),$Ni[1]      # n[1]          # modsched #
-       add     $A0[0],$A1[1]
-        mov    16($tptr,$j),$A0[0]     # t[0]          # modsched #
-       adc     \$0,$A1[0]
-       mul     $m1                     # n[7]*m1
-       add     %rax,$A1[1]             # n[7]*m1+"t[8]"
-        mov    $Ni[0],%rax             #               # modsched #
-       adc     %rdx,$A1[0]
-       mov     $A1[1],($tptr)          # "t[8]"
-
-       xor     $topbit,$topbit
-       add     8($tptr),$A1[0]         # +t[9]
-       adc     $topbit,$topbit
-       add     $A0[1],$A1[0]
-       lea     16($tptr),$tptr         # "t[$num]>>128"
-       adc     \$0,$topbit
-       mov     $A1[0],-8($tptr)        # "t[9]"
-       cmp     8(%rsp),$tptr           # are we done?
-       jb      .Lsqr4x_mont_outer
-
-       mov     0(%rsp),$num            # restore $num
-       mov     $topbit,($tptr)         # save $topbit
+.Lmulx4x_page_walk:
+       lea     -4096(%rsp),%rsp
+       mov     (%rsp),%r10
+       cmp     %rbp,%rsp
+       ja      .Lmulx4x_page_walk
+.Lmulx4x_page_walk_done:
+
+       lea     ($bp,$num),%r10
+       ##############################################################
+       # Stack layout
+       # +0    num
+       # +8    off-loaded &b[i]
+       # +16   end of b[num]
+       # +24   saved n0
+       # +32   saved rp
+       # +40   saved %rsp
+       # +48   inner counter
+       # +56
+       # +64   tmp[num+1]
+       #
+       mov     $num,0(%rsp)            # save $num
+       shr     \$5,$num
+       mov     %r10,16(%rsp)           # end of b[num]
+       sub     \$1,$num
+       mov     $n0, 24(%rsp)           # save *n0
+       mov     $rp, 32(%rsp)           # save $rp
+       mov     %rax,40(%rsp)           # save original %rsp
+       mov     $num,48(%rsp)           # inner counter
+       jmp     .Lmulx4x_body
+
+.align 32
+.Lmulx4x_body:
 ___
-}\f
-##############################################################
-# Post-condition, 4x unrolled copy from bn_mul_mont
-#
-{
-my ($tptr,$nptr)=("%rbx",$aptr);
-my @ri=("%rax","%rdx","%r10","%r11");
+my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
+   ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
+my $rptr=$bptr;
 $code.=<<___;
-       mov     64(%rsp,$num),@ri[0]    # tp[0]
-       lea     64(%rsp,$num),$tptr     # upper half of t[2*$num] holds result
-       mov     40(%rsp),$nptr          # restore $nptr
-       shr     \$5,$num                # num/4
-       mov     8($tptr),@ri[1]         # t[1]
-       xor     $i,$i                   # i=0 and clear CF!
-
-       mov     32(%rsp),$rptr          # restore $rptr
-       sub     0($nptr),@ri[0]
-       mov     16($tptr),@ri[2]        # t[2]
-       mov     24($tptr),@ri[3]        # t[3]
-       sbb     8($nptr),@ri[1]
-       lea     -1($num),$j             # j=num/4-1
-       jmp     .Lsqr4x_sub
-.align 16
-.Lsqr4x_sub:
-       mov     @ri[0],0($rptr,$i,8)    # rp[i]=tp[i]-np[i]
-       mov     @ri[1],8($rptr,$i,8)    # rp[i]=tp[i]-np[i]
-       sbb     16($nptr,$i,8),@ri[2]
-       mov     32($tptr,$i,8),@ri[0]   # tp[i+1]
-       mov     40($tptr,$i,8),@ri[1]
-       sbb     24($nptr,$i,8),@ri[3]
-       mov     @ri[2],16($rptr,$i,8)   # rp[i]=tp[i]-np[i]
-       mov     @ri[3],24($rptr,$i,8)   # rp[i]=tp[i]-np[i]
-       sbb     32($nptr,$i,8),@ri[0]
-       mov     48($tptr,$i,8),@ri[2]
-       mov     56($tptr,$i,8),@ri[3]
-       sbb     40($nptr,$i,8),@ri[1]
-       lea     4($i),$i                # i++
-       dec     $j                      # doesn't affect CF!
-       jnz     .Lsqr4x_sub
-
-       mov     @ri[0],0($rptr,$i,8)    # rp[i]=tp[i]-np[i]
-       mov     32($tptr,$i,8),@ri[0]   # load overflow bit
-       sbb     16($nptr,$i,8),@ri[2]
-       mov     @ri[1],8($rptr,$i,8)    # rp[i]=tp[i]-np[i]
-       sbb     24($nptr,$i,8),@ri[3]
-       mov     @ri[2],16($rptr,$i,8)   # rp[i]=tp[i]-np[i]
-
-       sbb     \$0,@ri[0]              # handle upmost overflow bit
-       mov     @ri[3],24($rptr,$i,8)   # rp[i]=tp[i]-np[i]
-       xor     $i,$i                   # i=0
-       and     @ri[0],$tptr
-       not     @ri[0]
-       mov     $rptr,$nptr
-       and     @ri[0],$nptr
-       lea     -1($num),$j
-       or      $nptr,$tptr             # tp=borrow?tp:rp
-
+       lea     8($bp),$bptr
+       mov     ($bp),%rdx              # b[0], $bp==%rdx actually
+       lea     64+32(%rsp),$tptr
+       mov     %rdx,$bi
+
+       mulx    0*8($aptr),$mi,%rax     # a[0]*b[0]
+       mulx    1*8($aptr),%r11,%r14    # a[1]*b[0]
+       add     %rax,%r11
+       mov     $bptr,8(%rsp)           # off-load &b[i]
+       mulx    2*8($aptr),%r12,%r13    # ...
+       adc     %r14,%r12
+       adc     \$0,%r13
+
+       mov     $mi,$bptr               # borrow $bptr
+       imulq   24(%rsp),$mi            # "t[0]"*n0
+       xor     $zero,$zero             # cf=0, of=0
+
+       mulx    3*8($aptr),%rax,%r14
+        mov    $mi,%rdx
+       lea     4*8($aptr),$aptr
+       adcx    %rax,%r13
+       adcx    $zero,%r14              # cf=0
+
+       mulx    0*8($nptr),%rax,%r10
+       adcx    %rax,$bptr              # discarded
+       adox    %r11,%r10
+       mulx    1*8($nptr),%rax,%r11
+       adcx    %rax,%r10
+       adox    %r12,%r11
+       .byte   0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00    # mulx  2*8($nptr),%rax,%r12
+       mov     48(%rsp),$bptr          # counter value
+       mov     %r10,-4*8($tptr)
+       adcx    %rax,%r11
+       adox    %r13,%r12
+       mulx    3*8($nptr),%rax,%r15
+        mov    $bi,%rdx
+       mov     %r11,-3*8($tptr)
+       adcx    %rax,%r12
+       adox    $zero,%r15              # of=0
+       lea     4*8($nptr),$nptr
+       mov     %r12,-2*8($tptr)
+
+       jmp     .Lmulx4x_1st
+
+.align 32
+.Lmulx4x_1st:
+       adcx    $zero,%r15              # cf=0, modulo-scheduled
+       mulx    0*8($aptr),%r10,%rax    # a[4]*b[0]
+       adcx    %r14,%r10
+       mulx    1*8($aptr),%r11,%r14    # a[5]*b[0]
+       adcx    %rax,%r11
+       mulx    2*8($aptr),%r12,%rax    # ...
+       adcx    %r14,%r12
+       mulx    3*8($aptr),%r13,%r14
+        .byte  0x67,0x67
+        mov    $mi,%rdx
+       adcx    %rax,%r13
+       adcx    $zero,%r14              # cf=0
+       lea     4*8($aptr),$aptr
+       lea     4*8($tptr),$tptr
+
+       adox    %r15,%r10
+       mulx    0*8($nptr),%rax,%r15
+       adcx    %rax,%r10
+       adox    %r15,%r11
+       mulx    1*8($nptr),%rax,%r15
+       adcx    %rax,%r11
+       adox    %r15,%r12
+       mulx    2*8($nptr),%rax,%r15
+       mov     %r10,-5*8($tptr)
+       adcx    %rax,%r12
+       mov     %r11,-4*8($tptr)
+       adox    %r15,%r13
+       mulx    3*8($nptr),%rax,%r15
+        mov    $bi,%rdx
+       mov     %r12,-3*8($tptr)
+       adcx    %rax,%r13
+       adox    $zero,%r15
+       lea     4*8($nptr),$nptr
+       mov     %r13,-2*8($tptr)
+
+       dec     $bptr                   # of=0, pass cf
+       jnz     .Lmulx4x_1st
+
+       mov     0(%rsp),$num            # load num
+       mov     8(%rsp),$bptr           # re-load &b[i]
+       adc     $zero,%r15              # modulo-scheduled
+       add     %r15,%r14
+       sbb     %r15,%r15               # top-most carry
+       mov     %r14,-1*8($tptr)
+       jmp     .Lmulx4x_outer
+
+.align 32
+.Lmulx4x_outer:
+       mov     ($bptr),%rdx            # b[i]
+       lea     8($bptr),$bptr          # b++
+       sub     $num,$aptr              # rewind $aptr
+       mov     %r15,($tptr)            # save top-most carry
+       lea     64+4*8(%rsp),$tptr
+       sub     $num,$nptr              # rewind $nptr
+
+       mulx    0*8($aptr),$mi,%r11     # a[0]*b[i]
+       xor     %ebp,%ebp               # xor   $zero,$zero     # cf=0, of=0
+       mov     %rdx,$bi
+       mulx    1*8($aptr),%r14,%r12    # a[1]*b[i]
+       adox    -4*8($tptr),$mi
+       adcx    %r14,%r11
+       mulx    2*8($aptr),%r15,%r13    # ...
+       adox    -3*8($tptr),%r11
+       adcx    %r15,%r12
+       adox    -2*8($tptr),%r12
+       adcx    $zero,%r13
+       adox    $zero,%r13
+
+       mov     $bptr,8(%rsp)           # off-load &b[i]
+       mov     $mi,%r15
+       imulq   24(%rsp),$mi            # "t[0]"*n0
+       xor     %ebp,%ebp               # xor   $zero,$zero     # cf=0, of=0
+
+       mulx    3*8($aptr),%rax,%r14
+        mov    $mi,%rdx
+       adcx    %rax,%r13
+       adox    -1*8($tptr),%r13
+       adcx    $zero,%r14
+       lea     4*8($aptr),$aptr
+       adox    $zero,%r14
+
+       mulx    0*8($nptr),%rax,%r10
+       adcx    %rax,%r15               # discarded
+       adox    %r11,%r10
+       mulx    1*8($nptr),%rax,%r11
+       adcx    %rax,%r10
+       adox    %r12,%r11
+       mulx    2*8($nptr),%rax,%r12
+       mov     %r10,-4*8($tptr)
+       adcx    %rax,%r11
+       adox    %r13,%r12
+       mulx    3*8($nptr),%rax,%r15
+        mov    $bi,%rdx
+       mov     %r11,-3*8($tptr)
+       lea     4*8($nptr),$nptr
+       adcx    %rax,%r12
+       adox    $zero,%r15              # of=0
+       mov     48(%rsp),$bptr          # counter value
+       mov     %r12,-2*8($tptr)
+
+       jmp     .Lmulx4x_inner
+
+.align 32
+.Lmulx4x_inner:
+       mulx    0*8($aptr),%r10,%rax    # a[4]*b[i]
+       adcx    $zero,%r15              # cf=0, modulo-scheduled
+       adox    %r14,%r10
+       mulx    1*8($aptr),%r11,%r14    # a[5]*b[i]
+       adcx    0*8($tptr),%r10
+       adox    %rax,%r11
+       mulx    2*8($aptr),%r12,%rax    # ...
+       adcx    1*8($tptr),%r11
+       adox    %r14,%r12
+       mulx    3*8($aptr),%r13,%r14
+        mov    $mi,%rdx
+       adcx    2*8($tptr),%r12
+       adox    %rax,%r13
+       adcx    3*8($tptr),%r13
+       adox    $zero,%r14              # of=0
+       lea     4*8($aptr),$aptr
+       lea     4*8($tptr),$tptr
+       adcx    $zero,%r14              # cf=0
+
+       adox    %r15,%r10
+       mulx    0*8($nptr),%rax,%r15
+       adcx    %rax,%r10
+       adox    %r15,%r11
+       mulx    1*8($nptr),%rax,%r15
+       adcx    %rax,%r11
+       adox    %r15,%r12
+       mulx    2*8($nptr),%rax,%r15
+       mov     %r10,-5*8($tptr)
+       adcx    %rax,%r12
+       adox    %r15,%r13
+       mulx    3*8($nptr),%rax,%r15
+        mov    $bi,%rdx
+       mov     %r11,-4*8($tptr)
+       mov     %r12,-3*8($tptr)
+       adcx    %rax,%r13
+       adox    $zero,%r15
+       lea     4*8($nptr),$nptr
+       mov     %r13,-2*8($tptr)
+
+       dec     $bptr                   # of=0, pass cf
+       jnz     .Lmulx4x_inner
+
+       mov     0(%rsp),$num            # load num
+       mov     8(%rsp),$bptr           # re-load &b[i]
+       adc     $zero,%r15              # modulo-scheduled
+       sub     0*8($tptr),$zero        # pull top-most carry
+       adc     %r15,%r14
+       sbb     %r15,%r15               # top-most carry
+       mov     %r14,-1*8($tptr)
+
+       cmp     16(%rsp),$bptr
+       jne     .Lmulx4x_outer
+
+       lea     64(%rsp),$tptr
+       sub     $num,$nptr              # rewind $nptr
+       neg     %r15
+       mov     $num,%rdx
+       shr     \$3+2,$num              # %cf=0
+       mov     32(%rsp),$rptr          # restore rp
+       jmp     .Lmulx4x_sub
+
+.align 32
+.Lmulx4x_sub:
+       mov     8*0($tptr),%r11
+       mov     8*1($tptr),%r12
+       mov     8*2($tptr),%r13
+       mov     8*3($tptr),%r14
+       lea     8*4($tptr),$tptr
+       sbb     8*0($nptr),%r11
+       sbb     8*1($nptr),%r12
+       sbb     8*2($nptr),%r13
+       sbb     8*3($nptr),%r14
+       lea     8*4($nptr),$nptr
+       mov     %r11,8*0($rptr)
+       mov     %r12,8*1($rptr)
+       mov     %r13,8*2($rptr)
+       mov     %r14,8*3($rptr)
+       lea     8*4($rptr),$rptr
+       dec     $num                    # preserves %cf
+       jnz     .Lmulx4x_sub
+
+       sbb     \$0,%r15                # top-most carry
+       lea     64(%rsp),$tptr
+       sub     %rdx,$rptr              # rewind
+
+       movq    %r15,%xmm1
        pxor    %xmm0,%xmm0
-       lea     64(%rsp,$num,8),$nptr
-       movdqu  ($tptr),%xmm1
-       lea     ($nptr,$num,8),$nptr
-       movdqa  %xmm0,64(%rsp)          # zap lower half of temporary vector
-       movdqa  %xmm0,($nptr)           # zap upper half of temporary vector
-       movdqu  %xmm1,($rptr)
-       jmp     .Lsqr4x_copy
-.align 16
-.Lsqr4x_copy:                          # copy or in-place refresh
-       movdqu  16($tptr,$i),%xmm2
-       movdqu  32($tptr,$i),%xmm1
-       movdqa  %xmm0,80(%rsp,$i)       # zap lower half of temporary vector
-       movdqa  %xmm0,96(%rsp,$i)       # zap lower half of temporary vector
-       movdqa  %xmm0,16($nptr,$i)      # zap upper half of temporary vector
-       movdqa  %xmm0,32($nptr,$i)      # zap upper half of temporary vector
-       movdqu  %xmm2,16($rptr,$i)
-       movdqu  %xmm1,32($rptr,$i)
-       lea     32($i),$i
-       dec     $j
-       jnz     .Lsqr4x_copy
+       pshufd  \$0,%xmm1,%xmm1
+       mov     40(%rsp),%rsi           # restore %rsp
+       jmp     .Lmulx4x_cond_copy
+
+.align 32
+.Lmulx4x_cond_copy:
+       movdqa  16*0($tptr),%xmm2
+       movdqa  16*1($tptr),%xmm3
+       lea     16*2($tptr),$tptr
+       movdqu  16*0($rptr),%xmm4
+       movdqu  16*1($rptr),%xmm5
+       lea     16*2($rptr),$rptr
+       movdqa  %xmm0,-16*2($tptr)      # zero tp
+       movdqa  %xmm0,-16*1($tptr)
+       pcmpeqd %xmm1,%xmm0
+       pand    %xmm1,%xmm2
+       pand    %xmm1,%xmm3
+       pand    %xmm0,%xmm4
+       pand    %xmm0,%xmm5
+       pxor    %xmm0,%xmm0
+       por     %xmm2,%xmm4
+       por     %xmm3,%xmm5
+       movdqu  %xmm4,-16*2($rptr)
+       movdqu  %xmm5,-16*1($rptr)
+       sub     \$32,%rdx
+       jnz     .Lmulx4x_cond_copy
+
+       mov     %rdx,($tptr)
 
-       movdqu  16($tptr,$i),%xmm2
-       movdqa  %xmm0,80(%rsp,$i)       # zap lower half of temporary vector
-       movdqa  %xmm0,16($nptr,$i)      # zap upper half of temporary vector
-       movdqu  %xmm2,16($rptr,$i)
-___
-}
-$code.=<<___;
-       mov     56(%rsp),%rsi           # restore %rsp
        mov     \$1,%rax
-       mov     0(%rsi),%r15
-       mov     8(%rsi),%r14
-       mov     16(%rsi),%r13
-       mov     24(%rsi),%r12
-       mov     32(%rsi),%rbp
-       mov     40(%rsi),%rbx
-       lea     48(%rsi),%rsp
-.Lsqr4x_epilogue:
+       mov     -48(%rsi),%r15
+       mov     -40(%rsi),%r14
+       mov     -32(%rsi),%r13
+       mov     -24(%rsi),%r12
+       mov     -16(%rsi),%rbp
+       mov     -8(%rsi),%rbx
+       lea     (%rsi),%rsp
+.Lmulx4x_epilogue:
        ret
-.size  bn_sqr4x_mont,.-bn_sqr4x_mont
+.size  bn_mulx4x_mont,.-bn_mulx4x_mont
 ___
 }}}
 $code.=<<___;
@@ -1545,22 +1375,8 @@ mul_handler:
 
        mov     192($context),%r10      # pull $num
        mov     8(%rax,%r10,8),%rax     # pull saved stack pointer
-       lea     48(%rax),%rax
-
-       mov     -8(%rax),%rbx
-       mov     -16(%rax),%rbp
-       mov     -24(%rax),%r12
-       mov     -32(%rax),%r13
-       mov     -40(%rax),%r14
-       mov     -48(%rax),%r15
-       mov     %rbx,144($context)      # restore context->Rbx
-       mov     %rbp,160($context)      # restore context->Rbp
-       mov     %r12,216($context)      # restore context->R12
-       mov     %r13,224($context)      # restore context->R13
-       mov     %r14,232($context)      # restore context->R14
-       mov     %r15,240($context)      # restore context->R15
 
-       jmp     .Lcommon_seh_tail
+       jmp     .Lcommon_pop_regs
 .size  mul_handler,.-mul_handler
 
 .type  sqr_handler,\@abi-omnipotent
@@ -1580,19 +1396,29 @@ sqr_handler:
        mov     120($context),%rax      # pull context->Rax
        mov     248($context),%rbx      # pull context->Rip
 
-       lea     .Lsqr4x_body(%rip),%r10
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # end of prologue label
        cmp     %r10,%rbx               # context->Rip<.Lsqr_body
        jb      .Lcommon_seh_tail
 
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # body label
+       cmp     %r10,%rbx               # context->Rip>=.Lsqr_epilogue
+       jb      .Lcommon_pop_regs
+
        mov     152($context),%rax      # pull context->Rsp
 
-       lea     .Lsqr4x_epilogue(%rip),%r10
+       mov     8(%r11),%r10d           # HandlerData[2]
+       lea     (%rsi,%r10),%r10        # epilogue label
        cmp     %r10,%rbx               # context->Rip>=.Lsqr_epilogue
        jae     .Lcommon_seh_tail
 
-       mov     56(%rax),%rax           # pull saved stack pointer
-       lea     48(%rax),%rax
+       mov     40(%rax),%rax           # pull saved stack pointer
 
+.Lcommon_pop_regs:
        mov     -8(%rax),%rbx
        mov     -16(%rax),%rbp
        mov     -24(%rax),%r12
@@ -1656,10 +1482,16 @@ sqr_handler:
        .rva    .LSEH_end_bn_mul4x_mont
        .rva    .LSEH_info_bn_mul4x_mont
 
-       .rva    .LSEH_begin_bn_sqr4x_mont
-       .rva    .LSEH_end_bn_sqr4x_mont
-       .rva    .LSEH_info_bn_sqr4x_mont
-
+       .rva    .LSEH_begin_bn_sqr8x_mont
+       .rva    .LSEH_end_bn_sqr8x_mont
+       .rva    .LSEH_info_bn_sqr8x_mont
+___
+$code.=<<___ if ($addx);
+       .rva    .LSEH_begin_bn_mulx4x_mont
+       .rva    .LSEH_end_bn_mulx4x_mont
+       .rva    .LSEH_info_bn_mulx4x_mont
+___
+$code.=<<___;
 .section       .xdata
 .align 8
 .LSEH_info_bn_mul_mont:
@@ -1670,9 +1502,18 @@ sqr_handler:
        .byte   9,0,0,0
        .rva    mul_handler
        .rva    .Lmul4x_body,.Lmul4x_epilogue   # HandlerData[]
-.LSEH_info_bn_sqr4x_mont:
+.LSEH_info_bn_sqr8x_mont:
+       .byte   9,0,0,0
+       .rva    sqr_handler
+       .rva    .Lsqr8x_prologue,.Lsqr8x_body,.Lsqr8x_epilogue          # HandlerData[]
+.align 8
+___
+$code.=<<___ if ($addx);
+.LSEH_info_bn_mulx4x_mont:
        .byte   9,0,0,0
        .rva    sqr_handler
+       .rva    .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue       # HandlerData[]
+.align 8
 ___
 }