X-Git-Url: https://git.openssl.org/gitweb/?a=blobdiff_plain;f=crypto%2Fbn%2Fasm%2Frsaz-avx2.pl;h=85cd73c668bddab6f328906712a39c5755601d5e;hb=72a7a7021fa8bc82a11bc08bac1b0241a92143d0;hp=dd83ed6239707a4b466e189d1e40bae35c785b2b;hpb=f3f620e1e0fa3553116f0b3e8c8d68b070fcdb79;p=openssl.git diff --git a/crypto/bn/asm/rsaz-avx2.pl b/crypto/bn/asm/rsaz-avx2.pl index dd83ed6239..85cd73c668 100755 --- a/crypto/bn/asm/rsaz-avx2.pl +++ b/crypto/bn/asm/rsaz-avx2.pl @@ -1,68 +1,41 @@ -#!/usr/bin/env perl - -############################################################################## -# # -# Copyright (c) 2012, Intel Corporation # -# # -# All rights reserved. # -# # -# Redistribution and use in source and binary forms, with or without # -# modification, are permitted provided that the following conditions are # -# met: # -# # -# * Redistributions of source code must retain the above copyright # -# notice, this list of conditions and the following disclaimer. # -# # -# * Redistributions in binary form must reproduce the above copyright # -# notice, this list of conditions and the following disclaimer in the # -# documentation and/or other materials provided with the # -# distribution. # -# # -# * Neither the name of the Intel Corporation nor the names of its # -# contributors may be used to endorse or promote products derived from # -# this software without specific prior written permission. # -# # -# # -# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY # -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR # -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # -# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -# # -############################################################################## -# Developers and authors: # -# Shay Gueron (1, 2), and Vlad Krasnov (1) # -# (1) Intel Corporation, Israel Development Center, Haifa, Israel # -# (2) University of Haifa, Israel # -############################################################################## -# Reference: # -# [1] S. Gueron, V. Krasnov: "Software Implementation of Modular # -# Exponentiation, Using Advanced Vector Instructions Architectures", # -# F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369, # -# pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012 # -# [2] S. Gueron: "Efficient Software Implementations of Modular # -# Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012). # -# [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE # -# Proceedings of 9th International Conference on Information Technology: # -# New Generations (ITNG 2012), pp.821-823 (2012) # -# [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis # -# resistant 1024-bit modular exponentiation, for optimizing RSA2048 # -# on AVX2 capable x86_64 platforms", # -# http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest# -############################################################################## +#! /usr/bin/env perl +# Copyright 2013-2019 The OpenSSL Project Authors. All Rights Reserved. +# Copyright (c) 2012, Intel Corporation. All Rights Reserved. +# +# Licensed under the OpenSSL license (the "License"). You may not use +# this file except in compliance with the License. You can obtain a copy +# in the file LICENSE in the source distribution or at +# https://www.openssl.org/source/license.html +# +# Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1) +# (1) Intel Corporation, Israel Development Center, Haifa, Israel +# (2) University of Haifa, Israel +# +# References: +# [1] S. Gueron, V. Krasnov: "Software Implementation of Modular +# Exponentiation, Using Advanced Vector Instructions Architectures", +# F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369, +# pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012 +# [2] S. Gueron: "Efficient Software Implementations of Modular +# Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012). +# [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE +# Proceedings of 9th International Conference on Information Technology: +# New Generations (ITNG 2012), pp.821-823 (2012) +# [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis +# resistant 1024-bit modular exponentiation, for optimizing RSA2048 +# on AVX2 capable x86_64 platforms", +# http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest # # +13% improvement over original submission by # # rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this # 2.3GHz Haswell 621 765/+23% 1113/+79% +# 2.3GHz Broadwell(**) 688 1200(***)/+74% 1120/+63% # # (*) if system doesn't support AVX2, for reference purposes; +# (**) scaled to 2.3GHz to simplify comparison; +# (***) scalar AD*X code is faster than AVX2 and is preferred code +# path for Broadwell; $flavour = shift; $output = shift; @@ -93,12 +66,13 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && $addx = ($1>=11); } -if (!$avx && `$ENV{CC} -v 2>&1` =~ /LLVM ([3-9]\.[0-9]+)/) { - $avx = ($1>=3.0) + ($1>=3.1); - $addx = 0; +if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9])\.([0-9]+)/) { + my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10 + $avx = ($ver>=3.0) + ($ver>=3.01); + $addx = ($ver>=3.03); } -open OUT,"| $^X $xlate $flavour $output"; +open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; *STDOUT = *OUT; if ($avx>1) {{{ @@ -156,13 +130,21 @@ $code.=<<___; .type rsaz_1024_sqr_avx2,\@function,5 .align 64 rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2 +.cfi_startproc lea (%rsp), %rax +.cfi_def_cfa_register %rax push %rbx +.cfi_push %rbx push %rbp +.cfi_push %rbp push %r12 +.cfi_push %r12 push %r13 +.cfi_push %r13 push %r14 +.cfi_push %r14 push %r15 +.cfi_push %r15 vzeroupper ___ $code.=<<___ if ($win64); @@ -181,6 +163,7 @@ $code.=<<___ if ($win64); ___ $code.=<<___; mov %rax,%rbp +.cfi_def_cfa_register %rbp mov %rdx, $np # reassigned argument sub \$$FrameSize, %rsp mov $np, $tmp @@ -234,7 +217,7 @@ $code.=<<___; vmovdqu 32*8-128($ap), $ACC8 lea 192(%rsp), $tp0 # 64+128=192 - vpbroadcastq .Land_mask(%rip), $AND_MASK + vmovdqu .Land_mask(%rip), $AND_MASK jmp .LOOP_GRANDE_SQR_1024 .align 32 @@ -370,7 +353,7 @@ $code.=<<___; vpaddq $TEMP1, $ACC1, $ACC1 vpmuludq 32*7-128($aap), $B2, $ACC2 vpbroadcastq 32*5-128($tpa), $B2 - vpaddq 32*11-448($tp1), $ACC2, $ACC2 + vpaddq 32*11-448($tp1), $ACC2, $ACC2 vmovdqu $ACC6, 32*6-192($tp0) vmovdqu $ACC7, 32*7-192($tp0) @@ -429,7 +412,7 @@ $code.=<<___; vmovdqu $ACC7, 32*16-448($tp1) lea 8($tp1), $tp1 - dec $i + dec $i jnz .LOOP_SQR_1024 ___ $ZERO = $ACC9; @@ -438,7 +421,7 @@ $TEMP2 = $B2; $TEMP3 = $Y1; $TEMP4 = $Y2; $code.=<<___; - #we need to fix indexes 32-39 to avoid overflow + # we need to fix indices 32-39 to avoid overflow vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0), vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0) vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0) @@ -774,7 +757,7 @@ $code.=<<___; vpblendd \$3, $TEMP4, $TEMP5, $TEMP4 vpaddq $TEMP3, $ACC7, $ACC7 vpaddq $TEMP4, $ACC8, $ACC8 - + vpsrlq \$29, $ACC4, $TEMP1 vpand $AND_MASK, $ACC4, $ACC4 vpsrlq \$29, $ACC5, $TEMP2 @@ -813,8 +796,10 @@ $code.=<<___; vzeroall mov %rbp, %rax +.cfi_def_cfa_register %rax ___ $code.=<<___ if ($win64); +.Lsqr_1024_in_tail: movaps -0xd8(%rax),%xmm6 movaps -0xc8(%rax),%xmm7 movaps -0xb8(%rax),%xmm8 @@ -828,14 +813,22 @@ $code.=<<___ if ($win64); ___ $code.=<<___; mov -48(%rax),%r15 +.cfi_restore %r15 mov -40(%rax),%r14 +.cfi_restore %r14 mov -32(%rax),%r13 +.cfi_restore %r13 mov -24(%rax),%r12 +.cfi_restore %r12 mov -16(%rax),%rbp +.cfi_restore %rbp mov -8(%rax),%rbx +.cfi_restore %rbx lea (%rax),%rsp # restore %rsp +.cfi_def_cfa_register %rsp .Lsqr_1024_epilogue: ret +.cfi_endproc .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2 ___ } @@ -888,13 +881,21 @@ $code.=<<___; .type rsaz_1024_mul_avx2,\@function,5 .align 64 rsaz_1024_mul_avx2: +.cfi_startproc lea (%rsp), %rax +.cfi_def_cfa_register %rax push %rbx +.cfi_push %rbx push %rbp +.cfi_push %rbp push %r12 +.cfi_push %r12 push %r13 +.cfi_push %r13 push %r14 +.cfi_push %r14 push %r15 +.cfi_push %r15 ___ $code.=<<___ if ($win64); vzeroupper @@ -913,6 +914,7 @@ $code.=<<___ if ($win64); ___ $code.=<<___; mov %rax,%rbp +.cfi_def_cfa_register %rbp vzeroall mov %rdx, $bp # reassigned argument sub \$64,%rsp @@ -991,6 +993,7 @@ $code.=<<___; vmovdqu .Land_mask(%rip), $AND_MASK mov \$9, $i + vmovdqu $ACC9, 32*9-128($rp) # $ACC9 is zero after vzeroall jmp .Loop_mul_1024 .align 32 @@ -1064,10 +1067,10 @@ $code.=<<___; vpmuludq 32*6-128($np),$Yi,$TEMP1 vpaddq $TEMP1,$ACC6,$ACC6 vpmuludq 32*7-128($np),$Yi,$TEMP2 - vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3 + vpblendd \$3, $ZERO, $ACC9, $TEMP1 # correct $ACC3 vpaddq $TEMP2,$ACC7,$ACC7 vpmuludq 32*8-128($np),$Yi,$TEMP0 - vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3 + vpaddq $TEMP1, $ACC3, $ACC3 # correct $ACC3 vpaddq $TEMP0,$ACC8,$ACC8 mov %rbx, %rax @@ -1080,7 +1083,9 @@ $code.=<<___; vmovdqu -8+32*2-128($ap),$TEMP2 mov $r1, %rax + vpblendd \$0xfc, $ZERO, $ACC9, $ACC9 # correct $ACC3 imull $n0, %eax + vpaddq $ACC9,$ACC4,$ACC4 # correct $ACC3 and \$0x1fffffff, %eax imulq 16-128($ap),%rbx @@ -1316,15 +1321,12 @@ ___ # But as we underutilize resources, it's possible to correct in # each iteration with marginal performance loss. But then, as # we do it in each iteration, we can correct less digits, and -# avoid performance penalties completely. Also note that we -# correct only three digits out of four. This works because -# most significant digit is subjected to less additions. +# avoid performance penalties completely. $TEMP0 = $ACC9; $TEMP3 = $Bi; $TEMP4 = $Yi; $code.=<<___; - vpermq \$0, $AND_MASK, $AND_MASK vpaddq (%rsp), $TEMP1, $ACC0 vpsrlq \$29, $ACC0, $TEMP1 @@ -1438,15 +1440,17 @@ $code.=<<___; vpaddq $TEMP4, $ACC8, $ACC8 vmovdqu $ACC4, 128-128($rp) - vmovdqu $ACC5, 160-128($rp) + vmovdqu $ACC5, 160-128($rp) vmovdqu $ACC6, 192-128($rp) vmovdqu $ACC7, 224-128($rp) vmovdqu $ACC8, 256-128($rp) vzeroupper mov %rbp, %rax +.cfi_def_cfa_register %rax ___ $code.=<<___ if ($win64); +.Lmul_1024_in_tail: movaps -0xd8(%rax),%xmm6 movaps -0xc8(%rax),%xmm7 movaps -0xb8(%rax),%xmm8 @@ -1460,14 +1464,22 @@ $code.=<<___ if ($win64); ___ $code.=<<___; mov -48(%rax),%r15 +.cfi_restore %r15 mov -40(%rax),%r14 +.cfi_restore %r14 mov -32(%rax),%r13 +.cfi_restore %r13 mov -24(%rax),%r12 +.cfi_restore %r12 mov -16(%rax),%rbp +.cfi_restore %rbp mov -8(%rax),%rbx +.cfi_restore %rbx lea (%rax),%rsp # restore %rsp +.cfi_def_cfa_register %rsp .Lmul_1024_epilogue: ret +.cfi_endproc .size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2 ___ } @@ -1480,6 +1492,7 @@ $code.=<<___; .type rsaz_1024_red2norm_avx2,\@abi-omnipotent .align 32 rsaz_1024_red2norm_avx2: +.cfi_startproc sub \$-128,$inp # size optimization xor %rax,%rax ___ @@ -1513,12 +1526,14 @@ ___ } $code.=<<___; ret +.cfi_endproc .size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2 .globl rsaz_1024_norm2red_avx2 .type rsaz_1024_norm2red_avx2,\@abi-omnipotent .align 32 rsaz_1024_norm2red_avx2: +.cfi_startproc sub \$-128,$out # size optimization mov ($inp),@T[0] mov \$0x1fffffff,%eax @@ -1550,6 +1565,7 @@ $code.=<<___; mov @T[0],`8*($j+2)-128`($out) mov @T[0],`8*($j+3)-128`($out) ret +.cfi_endproc .size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2 ___ } @@ -1561,6 +1577,7 @@ $code.=<<___; .type rsaz_1024_scatter5_avx2,\@abi-omnipotent .align 32 rsaz_1024_scatter5_avx2: +.cfi_startproc vzeroupper vmovdqu .Lscatter_permd(%rip),%ymm5 shl \$4,$power @@ -1580,74 +1597,137 @@ rsaz_1024_scatter5_avx2: vzeroupper ret +.cfi_endproc .size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2 .globl rsaz_1024_gather5_avx2 .type rsaz_1024_gather5_avx2,\@abi-omnipotent .align 32 rsaz_1024_gather5_avx2: +.cfi_startproc + vzeroupper + mov %rsp,%r11 +.cfi_def_cfa_register %r11 ___ $code.=<<___ if ($win64); lea -0x88(%rsp),%rax - vzeroupper .LSEH_begin_rsaz_1024_gather5: # I can't trust assembler to use specific encoding:-( - .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp - .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6,-0x20(%rax) - .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7,-0x10(%rax) - .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8,0(%rax) - .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9,0x10(%rax) - .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10,0x20(%rax) - .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11,0x30(%rax) - .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12,0x40(%rax) - .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13,0x50(%rax) - .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14,0x60(%rax) - .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15,0x70(%rax) + .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax),%rsp + .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6,-0x20(%rax) + .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7,-0x10(%rax) + .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8,0(%rax) + .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9,0x10(%rax) + .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10,0x20(%rax) + .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11,0x30(%rax) + .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12,0x40(%rax) + .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13,0x50(%rax) + .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14,0x60(%rax) + .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15,0x70(%rax) ___ $code.=<<___; - lea .Lgather_table(%rip),%r11 - mov $power,%eax - and \$3,$power - shr \$2,%eax # cache line number - shl \$4,$power # offset within cache line - - vmovdqu -32(%r11),%ymm7 # .Lgather_permd - vpbroadcastb 8(%r11,%rax), %xmm8 - vpbroadcastb 7(%r11,%rax), %xmm9 - vpbroadcastb 6(%r11,%rax), %xmm10 - vpbroadcastb 5(%r11,%rax), %xmm11 - vpbroadcastb 4(%r11,%rax), %xmm12 - vpbroadcastb 3(%r11,%rax), %xmm13 - vpbroadcastb 2(%r11,%rax), %xmm14 - vpbroadcastb 1(%r11,%rax), %xmm15 - - lea 64($inp,$power),$inp - mov \$64,%r11 # size optimization - mov \$9,%eax - jmp .Loop_gather_1024 + lea -0x100(%rsp),%rsp + and \$-32, %rsp + lea .Linc(%rip), %r10 + lea -128(%rsp),%rax # control u-op density + + vmovd $power, %xmm4 + vmovdqa (%r10),%ymm0 + vmovdqa 32(%r10),%ymm1 + vmovdqa 64(%r10),%ymm5 + vpbroadcastd %xmm4,%ymm4 + + vpaddd %ymm5, %ymm0, %ymm2 + vpcmpeqd %ymm4, %ymm0, %ymm0 + vpaddd %ymm5, %ymm1, %ymm3 + vpcmpeqd %ymm4, %ymm1, %ymm1 + vmovdqa %ymm0, 32*0+128(%rax) + vpaddd %ymm5, %ymm2, %ymm0 + vpcmpeqd %ymm4, %ymm2, %ymm2 + vmovdqa %ymm1, 32*1+128(%rax) + vpaddd %ymm5, %ymm3, %ymm1 + vpcmpeqd %ymm4, %ymm3, %ymm3 + vmovdqa %ymm2, 32*2+128(%rax) + vpaddd %ymm5, %ymm0, %ymm2 + vpcmpeqd %ymm4, %ymm0, %ymm0 + vmovdqa %ymm3, 32*3+128(%rax) + vpaddd %ymm5, %ymm1, %ymm3 + vpcmpeqd %ymm4, %ymm1, %ymm1 + vmovdqa %ymm0, 32*4+128(%rax) + vpaddd %ymm5, %ymm2, %ymm8 + vpcmpeqd %ymm4, %ymm2, %ymm2 + vmovdqa %ymm1, 32*5+128(%rax) + vpaddd %ymm5, %ymm3, %ymm9 + vpcmpeqd %ymm4, %ymm3, %ymm3 + vmovdqa %ymm2, 32*6+128(%rax) + vpaddd %ymm5, %ymm8, %ymm10 + vpcmpeqd %ymm4, %ymm8, %ymm8 + vmovdqa %ymm3, 32*7+128(%rax) + vpaddd %ymm5, %ymm9, %ymm11 + vpcmpeqd %ymm4, %ymm9, %ymm9 + vpaddd %ymm5, %ymm10, %ymm12 + vpcmpeqd %ymm4, %ymm10, %ymm10 + vpaddd %ymm5, %ymm11, %ymm13 + vpcmpeqd %ymm4, %ymm11, %ymm11 + vpaddd %ymm5, %ymm12, %ymm14 + vpcmpeqd %ymm4, %ymm12, %ymm12 + vpaddd %ymm5, %ymm13, %ymm15 + vpcmpeqd %ymm4, %ymm13, %ymm13 + vpcmpeqd %ymm4, %ymm14, %ymm14 + vpcmpeqd %ymm4, %ymm15, %ymm15 + + vmovdqa -32(%r10),%ymm7 # .Lgather_permd + lea 128($inp), $inp + mov \$9,$power -.align 32 .Loop_gather_1024: - vpand -64($inp), %xmm8,%xmm0 - vpand ($inp), %xmm9,%xmm1 - vpand 64($inp), %xmm10,%xmm2 - vpand ($inp,%r11,2), %xmm11,%xmm3 - vpor %xmm0,%xmm1,%xmm1 - vpand 64($inp,%r11,2), %xmm12,%xmm4 - vpor %xmm2,%xmm3,%xmm3 - vpand ($inp,%r11,4), %xmm13,%xmm5 - vpor %xmm1,%xmm3,%xmm3 - vpand 64($inp,%r11,4), %xmm14,%xmm6 - vpor %xmm4,%xmm5,%xmm5 - vpand -128($inp,%r11,8), %xmm15,%xmm2 - lea ($inp,%r11,8),$inp - vpor %xmm3,%xmm5,%xmm5 - vpor %xmm2,%xmm6,%xmm6 - vpor %xmm5,%xmm6,%xmm6 - vpermd %ymm6,%ymm7,%ymm6 - vmovdqu %ymm6,($out) + vmovdqa 32*0-128($inp), %ymm0 + vmovdqa 32*1-128($inp), %ymm1 + vmovdqa 32*2-128($inp), %ymm2 + vmovdqa 32*3-128($inp), %ymm3 + vpand 32*0+128(%rax), %ymm0, %ymm0 + vpand 32*1+128(%rax), %ymm1, %ymm1 + vpand 32*2+128(%rax), %ymm2, %ymm2 + vpor %ymm0, %ymm1, %ymm4 + vpand 32*3+128(%rax), %ymm3, %ymm3 + vmovdqa 32*4-128($inp), %ymm0 + vmovdqa 32*5-128($inp), %ymm1 + vpor %ymm2, %ymm3, %ymm5 + vmovdqa 32*6-128($inp), %ymm2 + vmovdqa 32*7-128($inp), %ymm3 + vpand 32*4+128(%rax), %ymm0, %ymm0 + vpand 32*5+128(%rax), %ymm1, %ymm1 + vpand 32*6+128(%rax), %ymm2, %ymm2 + vpor %ymm0, %ymm4, %ymm4 + vpand 32*7+128(%rax), %ymm3, %ymm3 + vpand 32*8-128($inp), %ymm8, %ymm0 + vpor %ymm1, %ymm5, %ymm5 + vpand 32*9-128($inp), %ymm9, %ymm1 + vpor %ymm2, %ymm4, %ymm4 + vpand 32*10-128($inp),%ymm10, %ymm2 + vpor %ymm3, %ymm5, %ymm5 + vpand 32*11-128($inp),%ymm11, %ymm3 + vpor %ymm0, %ymm4, %ymm4 + vpand 32*12-128($inp),%ymm12, %ymm0 + vpor %ymm1, %ymm5, %ymm5 + vpand 32*13-128($inp),%ymm13, %ymm1 + vpor %ymm2, %ymm4, %ymm4 + vpand 32*14-128($inp),%ymm14, %ymm2 + vpor %ymm3, %ymm5, %ymm5 + vpand 32*15-128($inp),%ymm15, %ymm3 + lea 32*16($inp), $inp + vpor %ymm0, %ymm4, %ymm4 + vpor %ymm1, %ymm5, %ymm5 + vpor %ymm2, %ymm4, %ymm4 + vpor %ymm3, %ymm5, %ymm5 + + vpor %ymm5, %ymm4, %ymm4 + vextracti128 \$1, %ymm4, %xmm5 # upper half is cleared + vpor %xmm4, %xmm5, %xmm5 + vpermd %ymm5,%ymm7,%ymm5 + vmovdqu %ymm5,($out) lea 32($out),$out - dec %eax + dec $power jnz .Loop_gather_1024 vpxor %ymm0,%ymm0,%ymm0 @@ -1655,21 +1735,23 @@ $code.=<<___; vzeroupper ___ $code.=<<___ if ($win64); - movaps (%rsp),%xmm6 - movaps 0x10(%rsp),%xmm7 - movaps 0x20(%rsp),%xmm8 - movaps 0x30(%rsp),%xmm9 - movaps 0x40(%rsp),%xmm10 - movaps 0x50(%rsp),%xmm11 - movaps 0x60(%rsp),%xmm12 - movaps 0x70(%rsp),%xmm13 - movaps 0x80(%rsp),%xmm14 - movaps 0x90(%rsp),%xmm15 - lea 0xa8(%rsp),%rsp -.LSEH_end_rsaz_1024_gather5: + movaps -0xa8(%r11),%xmm6 + movaps -0x98(%r11),%xmm7 + movaps -0x88(%r11),%xmm8 + movaps -0x78(%r11),%xmm9 + movaps -0x68(%r11),%xmm10 + movaps -0x58(%r11),%xmm11 + movaps -0x48(%r11),%xmm12 + movaps -0x38(%r11),%xmm13 + movaps -0x28(%r11),%xmm14 + movaps -0x18(%r11),%xmm15 ___ $code.=<<___; + lea (%r11),%rsp +.cfi_def_cfa_register %rsp ret +.cfi_endproc +.LSEH_end_rsaz_1024_gather5: .size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2 ___ } @@ -1697,13 +1779,15 @@ $code.=<<___; .align 64 .Land_mask: - .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1 + .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff .Lscatter_permd: .long 0,2,4,6,7,7,7,7 .Lgather_permd: .long 0,7,1,7,2,7,3,7 -.Lgather_table: - .byte 0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0 +.Linc: + .long 0,0,0,0, 1,1,1,1 + .long 2,2,2,2, 3,3,3,3 + .long 4,4,4,4, 4,4,4,4 .align 64 ___ @@ -1740,14 +1824,17 @@ rsaz_se_handler: cmp %r10,%rbx # context->RipRsp - mov 4(%r11),%r10d # HandlerData[1] lea (%rsi,%r10),%r10 # epilogue label cmp %r10,%rbx # context->Rip>=epilogue label jae .Lcommon_seh_tail - mov 160($context),%rax # pull context->Rbp + mov 160($context),%rbp # pull context->Rbp + + mov 8(%r11),%r10d # HandlerData[2] + lea (%rsi,%r10),%r10 # "in tail" label + cmp %r10,%rbx # context->Rip>="in tail" label + cmovc %rbp,%rax mov -48(%rax),%r15 mov -40(%rax),%r14 @@ -1825,24 +1912,27 @@ rsaz_se_handler: .LSEH_info_rsaz_1024_sqr_avx2: .byte 9,0,0,0 .rva rsaz_se_handler - .rva .Lsqr_1024_body,.Lsqr_1024_epilogue + .rva .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail + .long 0 .LSEH_info_rsaz_1024_mul_avx2: .byte 9,0,0,0 .rva rsaz_se_handler - .rva .Lmul_1024_body,.Lmul_1024_epilogue + .rva .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail + .long 0 .LSEH_info_rsaz_1024_gather5: - .byte 0x01,0x33,0x16,0x00 - .byte 0x36,0xf8,0x09,0x00 #vmovaps 0x90(rsp),xmm15 - .byte 0x31,0xe8,0x08,0x00 #vmovaps 0x80(rsp),xmm14 - .byte 0x2c,0xd8,0x07,0x00 #vmovaps 0x70(rsp),xmm13 - .byte 0x27,0xc8,0x06,0x00 #vmovaps 0x60(rsp),xmm12 - .byte 0x22,0xb8,0x05,0x00 #vmovaps 0x50(rsp),xmm11 - .byte 0x1d,0xa8,0x04,0x00 #vmovaps 0x40(rsp),xmm10 - .byte 0x18,0x98,0x03,0x00 #vmovaps 0x30(rsp),xmm9 - .byte 0x13,0x88,0x02,0x00 #vmovaps 0x20(rsp),xmm8 - .byte 0x0e,0x78,0x01,0x00 #vmovaps 0x10(rsp),xmm7 - .byte 0x09,0x68,0x00,0x00 #vmovaps 0x00(rsp),xmm6 - .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8 + .byte 0x01,0x36,0x17,0x0b + .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15 + .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14 + .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13 + .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12 + .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11 + .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10 + .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9 + .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8 + .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7 + .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6 + .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8 + .byte 0x00,0xb3,0x00,0x00 # set_frame r11 ___ }