0804c580be57a2b5d65cd5dc6f04e8dda63b6d7f
[openssl.git] / crypto / sha / asm / sha1-mb-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # Multi-buffer SHA1 procedure processes n buffers in parallel by
18 # placing buffer data to designated lane of SIMD register. n is
19 # naturally limited to 4 on pre-AVX2 processors and to 8 on
20 # AVX2-capable processors such as Haswell.
21 #
22 #               this    +aesni(i)       sha1    aesni-sha1      gain(iv)
23 # -------------------------------------------------------------------
24 # Westmere(ii)  10.7/n  +1.28=3.96(n=4) 5.30    6.66            +68%
25 # Atom(ii)      18.1/n  +3.93=8.46(n=4) 9.37    12.8            +51%
26 # Sandy Bridge  (8.16   +5.15=13.3)/n   4.99    5.98            +80%
27 # Ivy Bridge    (8.08   +5.14=13.2)/n   4.60    5.54            +68%
28 # Haswell(iii)  (8.96   +5.00=14.0)/n   3.57    4.55            +160%
29 # Skylake       (8.70   +5.00=13.7)/n   3.64    4.20            +145%
30 # Bulldozer     (9.76   +5.76=15.5)/n   5.95    6.37            +64%
31 #
32 # (i)   multi-block CBC encrypt with 128-bit key;
33 # (ii)  (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
34 #       because of lower AES-NI instruction throughput;
35 # (iii) "this" is for n=8, when we gather twice as much data, result
36 #       for n=4 is 8.00+4.44=12.4;
37 # (iv)  presented improvement coefficients are asymptotic limits and
38 #       in real-life application are somewhat lower, e.g. for 2KB
39 #       fragments they range from 30% to 100% (on Haswell);
40
41 # $output is the last argument if it looks like a file (it has an extension)
42 # $flavour is the first argument if it doesn't look like a file
43 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
44 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
45
46 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
47
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51 die "can't locate x86_64-xlate.pl";
52
53 $avx=0;
54
55 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
56                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
57         $avx = ($1>=2.19) + ($1>=2.22);
58 }
59
60 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
61            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
62         $avx = ($1>=2.09) + ($1>=2.10);
63 }
64
65 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
66            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
67         $avx = ($1>=10) + ($1>=11);
68 }
69
70 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
71         $avx = ($2>=3.0) + ($2>3.0);
72 }
73
74 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
75     or die "can't call $xlate: $!";
76 *STDOUT=*OUT;
77
78 # void sha1_multi_block (
79 #     struct {  unsigned int A[8];
80 #               unsigned int B[8];
81 #               unsigned int C[8];
82 #               unsigned int D[8];
83 #               unsigned int E[8];      } *ctx,
84 #     struct {  void *ptr; int blocks;  } inp[8],
85 #     int num);         /* 1 or 2 */
86 #
87 $ctx="%rdi";    # 1st arg
88 $inp="%rsi";    # 2nd arg
89 $num="%edx";
90 @ptr=map("%r$_",(8..11));
91 $Tbl="%rbp";
92
93 @V=($A,$B,$C,$D,$E)=map("%xmm$_",(0..4));
94 ($t0,$t1,$t2,$t3,$tx)=map("%xmm$_",(5..9));
95 @Xi=map("%xmm$_",(10..14));
96 $K="%xmm15";
97
98 if (1) {
99     # Atom-specific optimization aiming to eliminate pshufb with high
100     # registers [and thus get rid of 48 cycles accumulated penalty]
101     @Xi=map("%xmm$_",(0..4));
102     ($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9));
103     @V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14));
104 }
105
106 $REG_SZ=16;
107
108 sub Xi_off {
109 my $off = shift;
110
111     $off %= 16; $off *= $REG_SZ;
112     $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
113 }
114
115 sub BODY_00_19 {
116 my ($i,$a,$b,$c,$d,$e)=@_;
117 my $j=$i+1;
118 my $k=$i+2;
119
120 # Loads are performed 2+3/4 iterations in advance. 3/4 means that out
121 # of 4 words you would expect to be loaded per given iteration one is
122 # spilled to next iteration. In other words indices in four input
123 # streams are distributed as following:
124 #
125 # $i==0:        0,0,0,0,1,1,1,1,2,2,2,
126 # $i==1:        2,3,3,3,
127 # $i==2:        3,4,4,4,
128 # ...
129 # $i==13:       14,15,15,15,
130 # $i==14:       15
131 #
132 # Then at $i==15 Xupdate is applied one iteration in advance...
133 $code.=<<___ if ($i==0);
134         movd            (@ptr[0]),@Xi[0]
135          lea            `16*4`(@ptr[0]),@ptr[0]
136         movd            (@ptr[1]),@Xi[2]        # borrow @Xi[2]
137          lea            `16*4`(@ptr[1]),@ptr[1]
138         movd            (@ptr[2]),@Xi[3]        # borrow @Xi[3]
139          lea            `16*4`(@ptr[2]),@ptr[2]
140         movd            (@ptr[3]),@Xi[4]        # borrow @Xi[4]
141          lea            `16*4`(@ptr[3]),@ptr[3]
142         punpckldq       @Xi[3],@Xi[0]
143          movd           `4*$j-16*4`(@ptr[0]),@Xi[1]
144         punpckldq       @Xi[4],@Xi[2]
145          movd           `4*$j-16*4`(@ptr[1]),$t3
146         punpckldq       @Xi[2],@Xi[0]
147          movd           `4*$j-16*4`(@ptr[2]),$t2
148         pshufb          $tx,@Xi[0]
149 ___
150 $code.=<<___ if ($i<14);                        # just load input
151          movd           `4*$j-16*4`(@ptr[3]),$t1
152          punpckldq      $t2,@Xi[1]
153         movdqa  $a,$t2
154         paddd   $K,$e                           # e+=K_00_19
155          punpckldq      $t1,$t3
156         movdqa  $b,$t1
157         movdqa  $b,$t0
158         pslld   \$5,$t2
159         pandn   $d,$t1
160         pand    $c,$t0
161          punpckldq      $t3,@Xi[1]
162         movdqa  $a,$t3
163
164         movdqa  @Xi[0],`&Xi_off($i)`
165         paddd   @Xi[0],$e                       # e+=X[i]
166          movd           `4*$k-16*4`(@ptr[0]),@Xi[2]
167         psrld   \$27,$t3
168         pxor    $t1,$t0                         # Ch(b,c,d)
169         movdqa  $b,$t1
170
171         por     $t3,$t2                         # rol(a,5)
172          movd           `4*$k-16*4`(@ptr[1]),$t3
173         pslld   \$30,$t1
174         paddd   $t0,$e                          # e+=Ch(b,c,d)
175
176         psrld   \$2,$b
177         paddd   $t2,$e                          # e+=rol(a,5)
178          pshufb $tx,@Xi[1]
179          movd           `4*$k-16*4`(@ptr[2]),$t2
180         por     $t1,$b                          # b=rol(b,30)
181 ___
182 $code.=<<___ if ($i==14);                       # just load input
183          movd           `4*$j-16*4`(@ptr[3]),$t1
184          punpckldq      $t2,@Xi[1]
185         movdqa  $a,$t2
186         paddd   $K,$e                           # e+=K_00_19
187          punpckldq      $t1,$t3
188         movdqa  $b,$t1
189         movdqa  $b,$t0
190         pslld   \$5,$t2
191          prefetcht0     63(@ptr[0])
192         pandn   $d,$t1
193         pand    $c,$t0
194          punpckldq      $t3,@Xi[1]
195         movdqa  $a,$t3
196
197         movdqa  @Xi[0],`&Xi_off($i)`
198         paddd   @Xi[0],$e                       # e+=X[i]
199         psrld   \$27,$t3
200         pxor    $t1,$t0                         # Ch(b,c,d)
201         movdqa  $b,$t1
202          prefetcht0     63(@ptr[1])
203
204         por     $t3,$t2                         # rol(a,5)
205         pslld   \$30,$t1
206         paddd   $t0,$e                          # e+=Ch(b,c,d)
207          prefetcht0     63(@ptr[2])
208
209         psrld   \$2,$b
210         paddd   $t2,$e                          # e+=rol(a,5)
211          pshufb $tx,@Xi[1]
212          prefetcht0     63(@ptr[3])
213         por     $t1,$b                          # b=rol(b,30)
214 ___
215 $code.=<<___ if ($i>=13 && $i<15);
216         movdqa  `&Xi_off($j+2)`,@Xi[3]          # preload "X[2]"
217 ___
218 $code.=<<___ if ($i>=15);                       # apply Xupdate
219         pxor    @Xi[-2],@Xi[1]                  # "X[13]"
220         movdqa  `&Xi_off($j+2)`,@Xi[3]          # "X[2]"
221
222         movdqa  $a,$t2
223          pxor   `&Xi_off($j+8)`,@Xi[1]
224         paddd   $K,$e                           # e+=K_00_19
225         movdqa  $b,$t1
226         pslld   \$5,$t2
227          pxor   @Xi[3],@Xi[1]
228         movdqa  $b,$t0
229         pandn   $d,$t1
230          movdqa @Xi[1],$tx
231         pand    $c,$t0
232         movdqa  $a,$t3
233          psrld  \$31,$tx
234          paddd  @Xi[1],@Xi[1]
235
236         movdqa  @Xi[0],`&Xi_off($i)`
237         paddd   @Xi[0],$e                       # e+=X[i]
238         psrld   \$27,$t3
239         pxor    $t1,$t0                         # Ch(b,c,d)
240
241         movdqa  $b,$t1
242         por     $t3,$t2                         # rol(a,5)
243         pslld   \$30,$t1
244         paddd   $t0,$e                          # e+=Ch(b,c,d)
245
246         psrld   \$2,$b
247         paddd   $t2,$e                          # e+=rol(a,5)
248          por    $tx,@Xi[1]                      # rol   \$1,@Xi[1]
249         por     $t1,$b                          # b=rol(b,30)
250 ___
251 push(@Xi,shift(@Xi));
252 }
253
254 sub BODY_20_39 {
255 my ($i,$a,$b,$c,$d,$e)=@_;
256 my $j=$i+1;
257
258 $code.=<<___ if ($i<79);
259         pxor    @Xi[-2],@Xi[1]                  # "X[13]"
260         movdqa  `&Xi_off($j+2)`,@Xi[3]          # "X[2]"
261
262         movdqa  $a,$t2
263         movdqa  $d,$t0
264          pxor   `&Xi_off($j+8)`,@Xi[1]
265         paddd   $K,$e                           # e+=K_20_39
266         pslld   \$5,$t2
267         pxor    $b,$t0
268
269         movdqa  $a,$t3
270 ___
271 $code.=<<___ if ($i<72);
272         movdqa  @Xi[0],`&Xi_off($i)`
273 ___
274 $code.=<<___ if ($i<79);
275         paddd   @Xi[0],$e                       # e+=X[i]
276          pxor   @Xi[3],@Xi[1]
277         psrld   \$27,$t3
278         pxor    $c,$t0                          # Parity(b,c,d)
279         movdqa  $b,$t1
280
281         pslld   \$30,$t1
282          movdqa @Xi[1],$tx
283         por     $t3,$t2                         # rol(a,5)
284          psrld  \$31,$tx
285         paddd   $t0,$e                          # e+=Parity(b,c,d)
286          paddd  @Xi[1],@Xi[1]
287
288         psrld   \$2,$b
289         paddd   $t2,$e                          # e+=rol(a,5)
290          por    $tx,@Xi[1]                      # rol(@Xi[1],1)
291         por     $t1,$b                          # b=rol(b,30)
292 ___
293 $code.=<<___ if ($i==79);
294         movdqa  $a,$t2
295         paddd   $K,$e                           # e+=K_20_39
296         movdqa  $d,$t0
297         pslld   \$5,$t2
298         pxor    $b,$t0
299
300         movdqa  $a,$t3
301         paddd   @Xi[0],$e                       # e+=X[i]
302         psrld   \$27,$t3
303         movdqa  $b,$t1
304         pxor    $c,$t0                          # Parity(b,c,d)
305
306         pslld   \$30,$t1
307         por     $t3,$t2                         # rol(a,5)
308         paddd   $t0,$e                          # e+=Parity(b,c,d)
309
310         psrld   \$2,$b
311         paddd   $t2,$e                          # e+=rol(a,5)
312         por     $t1,$b                          # b=rol(b,30)
313 ___
314 push(@Xi,shift(@Xi));
315 }
316
317 sub BODY_40_59 {
318 my ($i,$a,$b,$c,$d,$e)=@_;
319 my $j=$i+1;
320
321 $code.=<<___;
322         pxor    @Xi[-2],@Xi[1]                  # "X[13]"
323         movdqa  `&Xi_off($j+2)`,@Xi[3]          # "X[2]"
324
325         movdqa  $a,$t2
326         movdqa  $d,$t1
327          pxor   `&Xi_off($j+8)`,@Xi[1]
328         pxor    @Xi[3],@Xi[1]
329         paddd   $K,$e                           # e+=K_40_59
330         pslld   \$5,$t2
331         movdqa  $a,$t3
332         pand    $c,$t1
333
334         movdqa  $d,$t0
335          movdqa @Xi[1],$tx
336         psrld   \$27,$t3
337         paddd   $t1,$e
338         pxor    $c,$t0
339
340         movdqa  @Xi[0],`&Xi_off($i)`
341         paddd   @Xi[0],$e                       # e+=X[i]
342         por     $t3,$t2                         # rol(a,5)
343          psrld  \$31,$tx
344         pand    $b,$t0
345         movdqa  $b,$t1
346
347         pslld   \$30,$t1
348          paddd  @Xi[1],@Xi[1]
349         paddd   $t0,$e                          # e+=Maj(b,d,c)
350
351         psrld   \$2,$b
352         paddd   $t2,$e                          # e+=rol(a,5)
353          por    $tx,@Xi[1]                      # rol(@X[1],1)
354         por     $t1,$b                          # b=rol(b,30)
355 ___
356 push(@Xi,shift(@Xi));
357 }
358
359 $code.=<<___;
360 .text
361
362 .extern OPENSSL_ia32cap_P
363
364 .globl  sha1_multi_block
365 .type   sha1_multi_block,\@function,3
366 .align  32
367 sha1_multi_block:
368 .cfi_startproc
369         mov     OPENSSL_ia32cap_P+4(%rip),%rcx
370         bt      \$61,%rcx                       # check SHA bit
371         jc      _shaext_shortcut
372 ___
373 $code.=<<___ if ($avx);
374         test    \$`1<<28`,%ecx
375         jnz     _avx_shortcut
376 ___
377 $code.=<<___;
378         mov     %rsp,%rax
379 .cfi_def_cfa_register   %rax
380         push    %rbx
381 .cfi_push       %rbx
382         push    %rbp
383 .cfi_push       %rbx
384 ___
385 $code.=<<___ if ($win64);
386         lea     -0xa8(%rsp),%rsp
387         movaps  %xmm6,(%rsp)
388         movaps  %xmm7,0x10(%rsp)
389         movaps  %xmm8,0x20(%rsp)
390         movaps  %xmm9,0x30(%rsp)
391         movaps  %xmm10,-0x78(%rax)
392         movaps  %xmm11,-0x68(%rax)
393         movaps  %xmm12,-0x58(%rax)
394         movaps  %xmm13,-0x48(%rax)
395         movaps  %xmm14,-0x38(%rax)
396         movaps  %xmm15,-0x28(%rax)
397 ___
398 $code.=<<___;
399         sub     \$`$REG_SZ*18`,%rsp
400         and     \$-256,%rsp
401         mov     %rax,`$REG_SZ*17`(%rsp)         # original %rsp
402 .cfi_cfa_expression     %rsp+`$REG_SZ*17`,deref,+8
403 .Lbody:
404         lea     K_XX_XX(%rip),$Tbl
405         lea     `$REG_SZ*16`(%rsp),%rbx
406
407 .Loop_grande:
408         mov     $num,`$REG_SZ*17+8`(%rsp)       # original $num
409         xor     $num,$num
410 ___
411 for($i=0;$i<4;$i++) {
412     $code.=<<___;
413         mov     `16*$i+0`($inp),@ptr[$i]        # input pointer
414         mov     `16*$i+8`($inp),%ecx            # number of blocks
415         cmp     $num,%ecx
416         cmovg   %ecx,$num                       # find maximum
417         test    %ecx,%ecx
418         mov     %ecx,`4*$i`(%rbx)               # initialize counters
419         cmovle  $Tbl,@ptr[$i]                   # cancel input
420 ___
421 }
422 $code.=<<___;
423         test    $num,$num
424         jz      .Ldone
425
426         movdqu  0x00($ctx),$A                   # load context
427          lea    128(%rsp),%rax
428         movdqu  0x20($ctx),$B
429         movdqu  0x40($ctx),$C
430         movdqu  0x60($ctx),$D
431         movdqu  0x80($ctx),$E
432         movdqa  0x60($Tbl),$tx                  # pbswap_mask
433         movdqa  -0x20($Tbl),$K                  # K_00_19
434         jmp     .Loop
435
436 .align  32
437 .Loop:
438 ___
439 for($i=0;$i<20;$i++)    { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
440 $code.="        movdqa  0x00($Tbl),$K\n";       # K_20_39
441 for(;$i<40;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
442 $code.="        movdqa  0x20($Tbl),$K\n";       # K_40_59
443 for(;$i<60;$i++)        { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
444 $code.="        movdqa  0x40($Tbl),$K\n";       # K_60_79
445 for(;$i<80;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
446 $code.=<<___;
447         movdqa  (%rbx),@Xi[0]                   # pull counters
448         mov     \$1,%ecx
449         cmp     4*0(%rbx),%ecx                  # examine counters
450         pxor    $t2,$t2
451         cmovge  $Tbl,@ptr[0]                    # cancel input
452         cmp     4*1(%rbx),%ecx
453         movdqa  @Xi[0],@Xi[1]
454         cmovge  $Tbl,@ptr[1]
455         cmp     4*2(%rbx),%ecx
456         pcmpgtd $t2,@Xi[1]                      # mask value
457         cmovge  $Tbl,@ptr[2]
458         cmp     4*3(%rbx),%ecx
459         paddd   @Xi[1],@Xi[0]                   # counters--
460         cmovge  $Tbl,@ptr[3]
461
462         movdqu  0x00($ctx),$t0
463         pand    @Xi[1],$A
464         movdqu  0x20($ctx),$t1
465         pand    @Xi[1],$B
466         paddd   $t0,$A
467         movdqu  0x40($ctx),$t2
468         pand    @Xi[1],$C
469         paddd   $t1,$B
470         movdqu  0x60($ctx),$t3
471         pand    @Xi[1],$D
472         paddd   $t2,$C
473         movdqu  0x80($ctx),$tx
474         pand    @Xi[1],$E
475         movdqu  $A,0x00($ctx)
476         paddd   $t3,$D
477         movdqu  $B,0x20($ctx)
478         paddd   $tx,$E
479         movdqu  $C,0x40($ctx)
480         movdqu  $D,0x60($ctx)
481         movdqu  $E,0x80($ctx)
482
483         movdqa  @Xi[0],(%rbx)                   # save counters
484         movdqa  0x60($Tbl),$tx                  # pbswap_mask
485         movdqa  -0x20($Tbl),$K                  # K_00_19
486         dec     $num
487         jnz     .Loop
488
489         mov     `$REG_SZ*17+8`(%rsp),$num
490         lea     $REG_SZ($ctx),$ctx
491         lea     `16*$REG_SZ/4`($inp),$inp
492         dec     $num
493         jnz     .Loop_grande
494
495 .Ldone:
496         mov     `$REG_SZ*17`(%rsp),%rax         # original %rsp
497 .cfi_def_cfa    %rax,8
498 ___
499 $code.=<<___ if ($win64);
500         movaps  -0xb8(%rax),%xmm6
501         movaps  -0xa8(%rax),%xmm7
502         movaps  -0x98(%rax),%xmm8
503         movaps  -0x88(%rax),%xmm9
504         movaps  -0x78(%rax),%xmm10
505         movaps  -0x68(%rax),%xmm11
506         movaps  -0x58(%rax),%xmm12
507         movaps  -0x48(%rax),%xmm13
508         movaps  -0x38(%rax),%xmm14
509         movaps  -0x28(%rax),%xmm15
510 ___
511 $code.=<<___;
512         mov     -16(%rax),%rbp
513 .cfi_restore    %rbp
514         mov     -8(%rax),%rbx
515 .cfi_restore    %rbx
516         lea     (%rax),%rsp
517 .cfi_def_cfa_register   %rsp
518 .Lepilogue:
519         ret
520 .cfi_endproc
521 .size   sha1_multi_block,.-sha1_multi_block
522 ___
523                                                 {{{
524 my ($ABCD0,$E0,$E0_,$BSWAP,$ABCD1,$E1,$E1_)=map("%xmm$_",(0..3,8..10));
525 my @MSG0=map("%xmm$_",(4..7));
526 my @MSG1=map("%xmm$_",(11..14));
527
528 $code.=<<___;
529 .type   sha1_multi_block_shaext,\@function,3
530 .align  32
531 sha1_multi_block_shaext:
532 .cfi_startproc
533 _shaext_shortcut:
534         mov     %rsp,%rax
535 .cfi_def_cfa_register   %rax
536         push    %rbx
537 .cfi_push       %rbx
538         push    %rbp
539 .cfi_push       %rbp
540 ___
541 $code.=<<___ if ($win64);
542         lea     -0xa8(%rsp),%rsp
543         movaps  %xmm6,(%rsp)
544         movaps  %xmm7,0x10(%rsp)
545         movaps  %xmm8,0x20(%rsp)
546         movaps  %xmm9,0x30(%rsp)
547         movaps  %xmm10,-0x78(%rax)
548         movaps  %xmm11,-0x68(%rax)
549         movaps  %xmm12,-0x58(%rax)
550         movaps  %xmm13,-0x48(%rax)
551         movaps  %xmm14,-0x38(%rax)
552         movaps  %xmm15,-0x28(%rax)
553 ___
554 $code.=<<___;
555         sub     \$`$REG_SZ*18`,%rsp
556         shl     \$1,$num                        # we process pair at a time
557         and     \$-256,%rsp
558         lea     0x40($ctx),$ctx                 # size optimization
559         mov     %rax,`$REG_SZ*17`(%rsp)         # original %rsp
560 .Lbody_shaext:
561         lea     `$REG_SZ*16`(%rsp),%rbx
562         movdqa  K_XX_XX+0x80(%rip),$BSWAP       # byte-n-word swap
563
564 .Loop_grande_shaext:
565         mov     $num,`$REG_SZ*17+8`(%rsp)       # original $num
566         xor     $num,$num
567 ___
568 for($i=0;$i<2;$i++) {
569     $code.=<<___;
570         mov     `16*$i+0`($inp),@ptr[$i]        # input pointer
571         mov     `16*$i+8`($inp),%ecx            # number of blocks
572         cmp     $num,%ecx
573         cmovg   %ecx,$num                       # find maximum
574         test    %ecx,%ecx
575         mov     %ecx,`4*$i`(%rbx)               # initialize counters
576         cmovle  %rsp,@ptr[$i]                   # cancel input
577 ___
578 }
579 $code.=<<___;
580         test    $num,$num
581         jz      .Ldone_shaext
582
583         movq            0x00-0x40($ctx),$ABCD0  # a1.a0
584         movq            0x20-0x40($ctx),@MSG0[0]# b1.b0
585         movq            0x40-0x40($ctx),@MSG0[1]# c1.c0
586         movq            0x60-0x40($ctx),@MSG0[2]# d1.d0
587         movq            0x80-0x40($ctx),@MSG0[3]# e1.e0
588
589         punpckldq       @MSG0[0],$ABCD0         # b1.a1.b0.a0
590         punpckldq       @MSG0[2],@MSG0[1]       # d1.c1.d0.c0
591
592         movdqa          $ABCD0,$ABCD1
593         punpcklqdq      @MSG0[1],$ABCD0         # d0.c0.b0.a0
594         punpckhqdq      @MSG0[1],$ABCD1         # d1.c1.b1.a1
595
596         pshufd          \$0b00111111,@MSG0[3],$E0
597         pshufd          \$0b01111111,@MSG0[3],$E1
598         pshufd          \$0b00011011,$ABCD0,$ABCD0
599         pshufd          \$0b00011011,$ABCD1,$ABCD1
600         jmp             .Loop_shaext
601
602 .align  32
603 .Loop_shaext:
604         movdqu          0x00(@ptr[0]),@MSG0[0]
605          movdqu         0x00(@ptr[1]),@MSG1[0]
606         movdqu          0x10(@ptr[0]),@MSG0[1]
607          movdqu         0x10(@ptr[1]),@MSG1[1]
608         movdqu          0x20(@ptr[0]),@MSG0[2]
609         pshufb          $BSWAP,@MSG0[0]
610          movdqu         0x20(@ptr[1]),@MSG1[2]
611          pshufb         $BSWAP,@MSG1[0]
612         movdqu          0x30(@ptr[0]),@MSG0[3]
613         lea             0x40(@ptr[0]),@ptr[0]
614         pshufb          $BSWAP,@MSG0[1]
615          movdqu         0x30(@ptr[1]),@MSG1[3]
616          lea            0x40(@ptr[1]),@ptr[1]
617          pshufb         $BSWAP,@MSG1[1]
618
619         movdqa          $E0,0x50(%rsp)          # offload
620         paddd           @MSG0[0],$E0
621          movdqa         $E1,0x70(%rsp)
622          paddd          @MSG1[0],$E1
623         movdqa          $ABCD0,0x40(%rsp)       # offload
624         movdqa          $ABCD0,$E0_
625          movdqa         $ABCD1,0x60(%rsp)
626          movdqa         $ABCD1,$E1_
627         sha1rnds4       \$0,$E0,$ABCD0          # 0-3
628         sha1nexte       @MSG0[1],$E0_
629          sha1rnds4      \$0,$E1,$ABCD1          # 0-3
630          sha1nexte      @MSG1[1],$E1_
631         pshufb          $BSWAP,@MSG0[2]
632         prefetcht0      127(@ptr[0])
633         sha1msg1        @MSG0[1],@MSG0[0]
634          pshufb         $BSWAP,@MSG1[2]
635          prefetcht0     127(@ptr[1])
636          sha1msg1       @MSG1[1],@MSG1[0]
637
638         pshufb          $BSWAP,@MSG0[3]
639         movdqa          $ABCD0,$E0
640          pshufb         $BSWAP,@MSG1[3]
641          movdqa         $ABCD1,$E1
642         sha1rnds4       \$0,$E0_,$ABCD0         # 4-7
643         sha1nexte       @MSG0[2],$E0
644          sha1rnds4      \$0,$E1_,$ABCD1         # 4-7
645          sha1nexte      @MSG1[2],$E1
646         pxor            @MSG0[2],@MSG0[0]
647         sha1msg1        @MSG0[2],@MSG0[1]
648          pxor           @MSG1[2],@MSG1[0]
649          sha1msg1       @MSG1[2],@MSG1[1]
650 ___
651 for($i=2;$i<20-4;$i++) {
652 $code.=<<___;
653         movdqa          $ABCD0,$E0_
654          movdqa         $ABCD1,$E1_
655         sha1rnds4       \$`int($i/5)`,$E0,$ABCD0        # 8-11
656         sha1nexte       @MSG0[3],$E0_
657          sha1rnds4      \$`int($i/5)`,$E1,$ABCD1        # 8-11
658          sha1nexte      @MSG1[3],$E1_
659         sha1msg2        @MSG0[3],@MSG0[0]
660          sha1msg2       @MSG1[3],@MSG1[0]
661         pxor            @MSG0[3],@MSG0[1]
662         sha1msg1        @MSG0[3],@MSG0[2]
663          pxor           @MSG1[3],@MSG1[1]
664          sha1msg1       @MSG1[3],@MSG1[2]
665 ___
666         ($E0,$E0_)=($E0_,$E0);          ($E1,$E1_)=($E1_,$E1);
667         push(@MSG0,shift(@MSG0));       push(@MSG1,shift(@MSG1));
668 }
669 $code.=<<___;
670         movdqa          $ABCD0,$E0_
671          movdqa         $ABCD1,$E1_
672         sha1rnds4       \$3,$E0,$ABCD0          # 64-67
673         sha1nexte       @MSG0[3],$E0_
674          sha1rnds4      \$3,$E1,$ABCD1          # 64-67
675          sha1nexte      @MSG1[3],$E1_
676         sha1msg2        @MSG0[3],@MSG0[0]
677          sha1msg2       @MSG1[3],@MSG1[0]
678         pxor            @MSG0[3],@MSG0[1]
679          pxor           @MSG1[3],@MSG1[1]
680
681         mov             \$1,%ecx
682         pxor            @MSG0[2],@MSG0[2]       # zero
683         cmp             4*0(%rbx),%ecx          # examine counters
684         cmovge          %rsp,@ptr[0]            # cancel input
685
686         movdqa          $ABCD0,$E0
687          movdqa         $ABCD1,$E1
688         sha1rnds4       \$3,$E0_,$ABCD0         # 68-71
689         sha1nexte       @MSG0[0],$E0
690          sha1rnds4      \$3,$E1_,$ABCD1         # 68-71
691          sha1nexte      @MSG1[0],$E1
692         sha1msg2        @MSG0[0],@MSG0[1]
693          sha1msg2       @MSG1[0],@MSG1[1]
694
695         cmp             4*1(%rbx),%ecx
696         cmovge          %rsp,@ptr[1]
697         movq            (%rbx),@MSG0[0]         # pull counters
698
699         movdqa          $ABCD0,$E0_
700          movdqa         $ABCD1,$E1_
701         sha1rnds4       \$3,$E0,$ABCD0          # 72-75
702         sha1nexte       @MSG0[1],$E0_
703          sha1rnds4      \$3,$E1,$ABCD1          # 72-75
704          sha1nexte      @MSG1[1],$E1_
705
706         pshufd          \$0x00,@MSG0[0],@MSG1[2]
707         pshufd          \$0x55,@MSG0[0],@MSG1[3]
708         movdqa          @MSG0[0],@MSG0[1]
709         pcmpgtd         @MSG0[2],@MSG1[2]
710         pcmpgtd         @MSG0[2],@MSG1[3]
711
712         movdqa          $ABCD0,$E0
713          movdqa         $ABCD1,$E1
714         sha1rnds4       \$3,$E0_,$ABCD0         # 76-79
715         sha1nexte       $MSG0[2],$E0
716          sha1rnds4      \$3,$E1_,$ABCD1         # 76-79
717          sha1nexte      $MSG0[2],$E1
718
719         pcmpgtd         @MSG0[2],@MSG0[1]       # counter mask
720         pand            @MSG1[2],$ABCD0
721         pand            @MSG1[2],$E0
722          pand           @MSG1[3],$ABCD1
723          pand           @MSG1[3],$E1
724         paddd           @MSG0[1],@MSG0[0]       # counters--
725
726         paddd           0x40(%rsp),$ABCD0
727         paddd           0x50(%rsp),$E0
728          paddd          0x60(%rsp),$ABCD1
729          paddd          0x70(%rsp),$E1
730
731         movq            @MSG0[0],(%rbx)         # save counters
732         dec             $num
733         jnz             .Loop_shaext
734
735         mov             `$REG_SZ*17+8`(%rsp),$num
736
737         pshufd          \$0b00011011,$ABCD0,$ABCD0
738         pshufd          \$0b00011011,$ABCD1,$ABCD1
739
740         movdqa          $ABCD0,@MSG0[0]
741         punpckldq       $ABCD1,$ABCD0           # b1.b0.a1.a0
742         punpckhdq       $ABCD1,@MSG0[0]         # d1.d0.c1.c0
743         punpckhdq       $E1,$E0                 # e1.e0.xx.xx
744         movq            $ABCD0,0x00-0x40($ctx)  # a1.a0
745         psrldq          \$8,$ABCD0
746         movq            @MSG0[0],0x40-0x40($ctx)# c1.c0
747         psrldq          \$8,@MSG0[0]
748         movq            $ABCD0,0x20-0x40($ctx)  # b1.b0
749         psrldq          \$8,$E0
750         movq            @MSG0[0],0x60-0x40($ctx)# d1.d0
751         movq            $E0,0x80-0x40($ctx)     # e1.e0
752
753         lea     `$REG_SZ/2`($ctx),$ctx
754         lea     `16*2`($inp),$inp
755         dec     $num
756         jnz     .Loop_grande_shaext
757
758 .Ldone_shaext:
759         #mov    `$REG_SZ*17`(%rsp),%rax         # original %rsp
760 ___
761 $code.=<<___ if ($win64);
762         movaps  -0xb8(%rax),%xmm6
763         movaps  -0xa8(%rax),%xmm7
764         movaps  -0x98(%rax),%xmm8
765         movaps  -0x88(%rax),%xmm9
766         movaps  -0x78(%rax),%xmm10
767         movaps  -0x68(%rax),%xmm11
768         movaps  -0x58(%rax),%xmm12
769         movaps  -0x48(%rax),%xmm13
770         movaps  -0x38(%rax),%xmm14
771         movaps  -0x28(%rax),%xmm15
772 ___
773 $code.=<<___;
774         mov     -16(%rax),%rbp
775 .cfi_restore    %rbp
776         mov     -8(%rax),%rbx
777 .cfi_restore    %rbx
778         lea     (%rax),%rsp
779 .cfi_def_cfa_register   %rsp
780 .Lepilogue_shaext:
781         ret
782 .cfi_endproc
783 .size   sha1_multi_block_shaext,.-sha1_multi_block_shaext
784 ___
785                                                 }}}
786
787                                                 if ($avx) {{{
788 sub BODY_00_19_avx {
789 my ($i,$a,$b,$c,$d,$e)=@_;
790 my $j=$i+1;
791 my $k=$i+2;
792 my $vpack = $REG_SZ==16 ? "vpunpckldq" : "vinserti128";
793 my $ptr_n = $REG_SZ==16 ? @ptr[1] : @ptr[4];
794
795 $code.=<<___ if ($i==0 && $REG_SZ==16);
796         vmovd           (@ptr[0]),@Xi[0]
797          lea            `16*4`(@ptr[0]),@ptr[0]
798         vmovd           (@ptr[1]),@Xi[2]        # borrow Xi[2]
799          lea            `16*4`(@ptr[1]),@ptr[1]
800         vpinsrd         \$1,(@ptr[2]),@Xi[0],@Xi[0]
801          lea            `16*4`(@ptr[2]),@ptr[2]
802         vpinsrd         \$1,(@ptr[3]),@Xi[2],@Xi[2]
803          lea            `16*4`(@ptr[3]),@ptr[3]
804          vmovd          `4*$j-16*4`(@ptr[0]),@Xi[1]
805         vpunpckldq      @Xi[2],@Xi[0],@Xi[0]
806          vmovd          `4*$j-16*4`($ptr_n),$t3
807         vpshufb         $tx,@Xi[0],@Xi[0]
808 ___
809 $code.=<<___ if ($i<15 && $REG_SZ==16);         # just load input
810          vpinsrd        \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
811          vpinsrd        \$1,`4*$j-16*4`(@ptr[3]),$t3,$t3
812 ___
813 $code.=<<___ if ($i==0 && $REG_SZ==32);
814         vmovd           (@ptr[0]),@Xi[0]
815          lea            `16*4`(@ptr[0]),@ptr[0]
816         vmovd           (@ptr[4]),@Xi[2]        # borrow Xi[2]
817          lea            `16*4`(@ptr[4]),@ptr[4]
818         vmovd           (@ptr[1]),$t2
819          lea            `16*4`(@ptr[1]),@ptr[1]
820         vmovd           (@ptr[5]),$t1
821          lea            `16*4`(@ptr[5]),@ptr[5]
822         vpinsrd         \$1,(@ptr[2]),@Xi[0],@Xi[0]
823          lea            `16*4`(@ptr[2]),@ptr[2]
824         vpinsrd         \$1,(@ptr[6]),@Xi[2],@Xi[2]
825          lea            `16*4`(@ptr[6]),@ptr[6]
826         vpinsrd         \$1,(@ptr[3]),$t2,$t2
827          lea            `16*4`(@ptr[3]),@ptr[3]
828         vpunpckldq      $t2,@Xi[0],@Xi[0]
829         vpinsrd         \$1,(@ptr[7]),$t1,$t1
830          lea            `16*4`(@ptr[7]),@ptr[7]
831         vpunpckldq      $t1,@Xi[2],@Xi[2]
832          vmovd          `4*$j-16*4`(@ptr[0]),@Xi[1]
833         vinserti128     @Xi[2],@Xi[0],@Xi[0]
834          vmovd          `4*$j-16*4`($ptr_n),$t3
835         vpshufb         $tx,@Xi[0],@Xi[0]
836 ___
837 $code.=<<___ if ($i<15 && $REG_SZ==32);         # just load input
838          vmovd          `4*$j-16*4`(@ptr[1]),$t2
839          vmovd          `4*$j-16*4`(@ptr[5]),$t1
840          vpinsrd        \$1,`4*$j-16*4`(@ptr[2]),@Xi[1],@Xi[1]
841          vpinsrd        \$1,`4*$j-16*4`(@ptr[6]),$t3,$t3
842          vpinsrd        \$1,`4*$j-16*4`(@ptr[3]),$t2,$t2
843          vpunpckldq     $t2,@Xi[1],@Xi[1]
844          vpinsrd        \$1,`4*$j-16*4`(@ptr[7]),$t1,$t1
845          vpunpckldq     $t1,$t3,$t3
846 ___
847 $code.=<<___ if ($i<14);
848         vpaddd  $K,$e,$e                        # e+=K_00_19
849         vpslld  \$5,$a,$t2
850         vpandn  $d,$b,$t1
851         vpand   $c,$b,$t0
852
853         vmovdqa @Xi[0],`&Xi_off($i)`
854         vpaddd  @Xi[0],$e,$e                    # e+=X[i]
855          $vpack         $t3,@Xi[1],@Xi[1]
856         vpsrld  \$27,$a,$t3
857         vpxor   $t1,$t0,$t0                     # Ch(b,c,d)
858          vmovd          `4*$k-16*4`(@ptr[0]),@Xi[2]
859
860         vpslld  \$30,$b,$t1
861         vpor    $t3,$t2,$t2                     # rol(a,5)
862          vmovd          `4*$k-16*4`($ptr_n),$t3
863         vpaddd  $t0,$e,$e                       # e+=Ch(b,c,d)
864
865         vpsrld  \$2,$b,$b
866         vpaddd  $t2,$e,$e                       # e+=rol(a,5)
867          vpshufb        $tx,@Xi[1],@Xi[1]
868         vpor    $t1,$b,$b                       # b=rol(b,30)
869 ___
870 $code.=<<___ if ($i==14);
871         vpaddd  $K,$e,$e                        # e+=K_00_19
872          prefetcht0     63(@ptr[0])
873         vpslld  \$5,$a,$t2
874         vpandn  $d,$b,$t1
875         vpand   $c,$b,$t0
876
877         vmovdqa @Xi[0],`&Xi_off($i)`
878         vpaddd  @Xi[0],$e,$e                    # e+=X[i]
879          $vpack         $t3,@Xi[1],@Xi[1]
880         vpsrld  \$27,$a,$t3
881          prefetcht0     63(@ptr[1])
882         vpxor   $t1,$t0,$t0                     # Ch(b,c,d)
883
884         vpslld  \$30,$b,$t1
885         vpor    $t3,$t2,$t2                     # rol(a,5)
886          prefetcht0     63(@ptr[2])
887         vpaddd  $t0,$e,$e                       # e+=Ch(b,c,d)
888
889         vpsrld  \$2,$b,$b
890         vpaddd  $t2,$e,$e                       # e+=rol(a,5)
891          prefetcht0     63(@ptr[3])
892          vpshufb        $tx,@Xi[1],@Xi[1]
893         vpor    $t1,$b,$b                       # b=rol(b,30)
894 ___
895 $code.=<<___ if ($i>=13 && $i<15);
896         vmovdqa `&Xi_off($j+2)`,@Xi[3]          # preload "X[2]"
897 ___
898 $code.=<<___ if ($i>=15);                       # apply Xupdate
899         vpxor   @Xi[-2],@Xi[1],@Xi[1]           # "X[13]"
900         vmovdqa `&Xi_off($j+2)`,@Xi[3]          # "X[2]"
901
902         vpaddd  $K,$e,$e                        # e+=K_00_19
903         vpslld  \$5,$a,$t2
904         vpandn  $d,$b,$t1
905          `"prefetcht0   63(@ptr[4])"            if ($i==15 && $REG_SZ==32)`
906         vpand   $c,$b,$t0
907
908         vmovdqa @Xi[0],`&Xi_off($i)`
909         vpaddd  @Xi[0],$e,$e                    # e+=X[i]
910          vpxor  `&Xi_off($j+8)`,@Xi[1],@Xi[1]
911         vpsrld  \$27,$a,$t3
912         vpxor   $t1,$t0,$t0                     # Ch(b,c,d)
913          vpxor  @Xi[3],@Xi[1],@Xi[1]
914          `"prefetcht0   63(@ptr[5])"            if ($i==15 && $REG_SZ==32)`
915
916         vpslld  \$30,$b,$t1
917         vpor    $t3,$t2,$t2                     # rol(a,5)
918         vpaddd  $t0,$e,$e                       # e+=Ch(b,c,d)
919          `"prefetcht0   63(@ptr[6])"            if ($i==15 && $REG_SZ==32)`
920          vpsrld \$31,@Xi[1],$tx
921          vpaddd @Xi[1],@Xi[1],@Xi[1]
922
923         vpsrld  \$2,$b,$b
924          `"prefetcht0   63(@ptr[7])"            if ($i==15 && $REG_SZ==32)`
925         vpaddd  $t2,$e,$e                       # e+=rol(a,5)
926          vpor   $tx,@Xi[1],@Xi[1]               # rol   \$1,@Xi[1]
927         vpor    $t1,$b,$b                       # b=rol(b,30)
928 ___
929 push(@Xi,shift(@Xi));
930 }
931
932 sub BODY_20_39_avx {
933 my ($i,$a,$b,$c,$d,$e)=@_;
934 my $j=$i+1;
935
936 $code.=<<___ if ($i<79);
937         vpxor   @Xi[-2],@Xi[1],@Xi[1]           # "X[13]"
938         vmovdqa `&Xi_off($j+2)`,@Xi[3]          # "X[2]"
939
940         vpslld  \$5,$a,$t2
941         vpaddd  $K,$e,$e                        # e+=K_20_39
942         vpxor   $b,$d,$t0
943 ___
944 $code.=<<___ if ($i<72);
945         vmovdqa @Xi[0],`&Xi_off($i)`
946 ___
947 $code.=<<___ if ($i<79);
948         vpaddd  @Xi[0],$e,$e                    # e+=X[i]
949          vpxor  `&Xi_off($j+8)`,@Xi[1],@Xi[1]
950         vpsrld  \$27,$a,$t3
951         vpxor   $c,$t0,$t0                      # Parity(b,c,d)
952          vpxor  @Xi[3],@Xi[1],@Xi[1]
953
954         vpslld  \$30,$b,$t1
955         vpor    $t3,$t2,$t2                     # rol(a,5)
956         vpaddd  $t0,$e,$e                       # e+=Parity(b,c,d)
957          vpsrld \$31,@Xi[1],$tx
958          vpaddd @Xi[1],@Xi[1],@Xi[1]
959
960         vpsrld  \$2,$b,$b
961         vpaddd  $t2,$e,$e                       # e+=rol(a,5)
962          vpor   $tx,@Xi[1],@Xi[1]               # rol(@Xi[1],1)
963         vpor    $t1,$b,$b                       # b=rol(b,30)
964 ___
965 $code.=<<___ if ($i==79);
966         vpslld  \$5,$a,$t2
967         vpaddd  $K,$e,$e                        # e+=K_20_39
968         vpxor   $b,$d,$t0
969
970         vpsrld  \$27,$a,$t3
971         vpaddd  @Xi[0],$e,$e                    # e+=X[i]
972         vpxor   $c,$t0,$t0                      # Parity(b,c,d)
973
974         vpslld  \$30,$b,$t1
975         vpor    $t3,$t2,$t2                     # rol(a,5)
976         vpaddd  $t0,$e,$e                       # e+=Parity(b,c,d)
977
978         vpsrld  \$2,$b,$b
979         vpaddd  $t2,$e,$e                       # e+=rol(a,5)
980         vpor    $t1,$b,$b                       # b=rol(b,30)
981 ___
982 push(@Xi,shift(@Xi));
983 }
984
985 sub BODY_40_59_avx {
986 my ($i,$a,$b,$c,$d,$e)=@_;
987 my $j=$i+1;
988
989 $code.=<<___;
990         vpxor   @Xi[-2],@Xi[1],@Xi[1]           # "X[13]"
991         vmovdqa `&Xi_off($j+2)`,@Xi[3]          # "X[2]"
992
993         vpaddd  $K,$e,$e                        # e+=K_40_59
994         vpslld  \$5,$a,$t2
995         vpand   $c,$d,$t1
996          vpxor  `&Xi_off($j+8)`,@Xi[1],@Xi[1]
997
998         vpaddd  $t1,$e,$e
999         vpsrld  \$27,$a,$t3
1000         vpxor   $c,$d,$t0
1001          vpxor  @Xi[3],@Xi[1],@Xi[1]
1002
1003         vmovdqu @Xi[0],`&Xi_off($i)`
1004         vpaddd  @Xi[0],$e,$e                    # e+=X[i]
1005         vpor    $t3,$t2,$t2                     # rol(a,5)
1006          vpsrld \$31,@Xi[1],$tx
1007         vpand   $b,$t0,$t0
1008          vpaddd @Xi[1],@Xi[1],@Xi[1]
1009
1010         vpslld  \$30,$b,$t1
1011         vpaddd  $t0,$e,$e                       # e+=Maj(b,d,c)
1012
1013         vpsrld  \$2,$b,$b
1014         vpaddd  $t2,$e,$e                       # e+=rol(a,5)
1015          vpor   $tx,@Xi[1],@Xi[1]               # rol(@X[1],1)
1016         vpor    $t1,$b,$b                       # b=rol(b,30)
1017 ___
1018 push(@Xi,shift(@Xi));
1019 }
1020
1021 $code.=<<___;
1022 .type   sha1_multi_block_avx,\@function,3
1023 .align  32
1024 sha1_multi_block_avx:
1025 .cfi_startproc
1026 _avx_shortcut:
1027 ___
1028 $code.=<<___ if ($avx>1);
1029         shr     \$32,%rcx
1030         cmp     \$2,$num
1031         jb      .Lavx
1032         test    \$`1<<5`,%ecx
1033         jnz     _avx2_shortcut
1034         jmp     .Lavx
1035 .align  32
1036 .Lavx:
1037 ___
1038 $code.=<<___;
1039         mov     %rsp,%rax
1040 .cfi_def_cfa_register   %rax
1041         push    %rbx
1042 .cfi_push       %rbx
1043         push    %rbp
1044 .cfi_push       %rbp
1045 ___
1046 $code.=<<___ if ($win64);
1047         lea     -0xa8(%rsp),%rsp
1048         movaps  %xmm6,(%rsp)
1049         movaps  %xmm7,0x10(%rsp)
1050         movaps  %xmm8,0x20(%rsp)
1051         movaps  %xmm9,0x30(%rsp)
1052         movaps  %xmm10,-0x78(%rax)
1053         movaps  %xmm11,-0x68(%rax)
1054         movaps  %xmm12,-0x58(%rax)
1055         movaps  %xmm13,-0x48(%rax)
1056         movaps  %xmm14,-0x38(%rax)
1057         movaps  %xmm15,-0x28(%rax)
1058 ___
1059 $code.=<<___;
1060         sub     \$`$REG_SZ*18`, %rsp
1061         and     \$-256,%rsp
1062         mov     %rax,`$REG_SZ*17`(%rsp)         # original %rsp
1063 .cfi_cfa_expression     %rsp+`$REG_SZ*17`,deref,+8
1064 .Lbody_avx:
1065         lea     K_XX_XX(%rip),$Tbl
1066         lea     `$REG_SZ*16`(%rsp),%rbx
1067
1068         vzeroupper
1069 .Loop_grande_avx:
1070         mov     $num,`$REG_SZ*17+8`(%rsp)       # original $num
1071         xor     $num,$num
1072 ___
1073 for($i=0;$i<4;$i++) {
1074     $code.=<<___;
1075         mov     `16*$i+0`($inp),@ptr[$i]        # input pointer
1076         mov     `16*$i+8`($inp),%ecx            # number of blocks
1077         cmp     $num,%ecx
1078         cmovg   %ecx,$num                       # find maximum
1079         test    %ecx,%ecx
1080         mov     %ecx,`4*$i`(%rbx)               # initialize counters
1081         cmovle  $Tbl,@ptr[$i]                   # cancel input
1082 ___
1083 }
1084 $code.=<<___;
1085         test    $num,$num
1086         jz      .Ldone_avx
1087
1088         vmovdqu 0x00($ctx),$A                   # load context
1089          lea    128(%rsp),%rax
1090         vmovdqu 0x20($ctx),$B
1091         vmovdqu 0x40($ctx),$C
1092         vmovdqu 0x60($ctx),$D
1093         vmovdqu 0x80($ctx),$E
1094         vmovdqu 0x60($Tbl),$tx                  # pbswap_mask
1095         jmp     .Loop_avx
1096
1097 .align  32
1098 .Loop_avx:
1099 ___
1100 $code.="        vmovdqa -0x20($Tbl),$K\n";      # K_00_19
1101 for($i=0;$i<20;$i++)    { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
1102 $code.="        vmovdqa 0x00($Tbl),$K\n";       # K_20_39
1103 for(;$i<40;$i++)        { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1104 $code.="        vmovdqa 0x20($Tbl),$K\n";       # K_40_59
1105 for(;$i<60;$i++)        { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
1106 $code.="        vmovdqa 0x40($Tbl),$K\n";       # K_60_79
1107 for(;$i<80;$i++)        { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1108 $code.=<<___;
1109         mov     \$1,%ecx
1110 ___
1111 for($i=0;$i<4;$i++) {
1112     $code.=<<___;
1113         cmp     `4*$i`(%rbx),%ecx               # examine counters
1114         cmovge  $Tbl,@ptr[$i]                   # cancel input
1115 ___
1116 }
1117 $code.=<<___;
1118         vmovdqu (%rbx),$t0                      # pull counters
1119         vpxor   $t2,$t2,$t2
1120         vmovdqa $t0,$t1
1121         vpcmpgtd $t2,$t1,$t1                    # mask value
1122         vpaddd  $t1,$t0,$t0                     # counters--
1123
1124         vpand   $t1,$A,$A
1125         vpand   $t1,$B,$B
1126         vpaddd  0x00($ctx),$A,$A
1127         vpand   $t1,$C,$C
1128         vpaddd  0x20($ctx),$B,$B
1129         vpand   $t1,$D,$D
1130         vpaddd  0x40($ctx),$C,$C
1131         vpand   $t1,$E,$E
1132         vpaddd  0x60($ctx),$D,$D
1133         vpaddd  0x80($ctx),$E,$E
1134         vmovdqu $A,0x00($ctx)
1135         vmovdqu $B,0x20($ctx)
1136         vmovdqu $C,0x40($ctx)
1137         vmovdqu $D,0x60($ctx)
1138         vmovdqu $E,0x80($ctx)
1139
1140         vmovdqu $t0,(%rbx)                      # save counters
1141         vmovdqu 0x60($Tbl),$tx                  # pbswap_mask
1142         dec     $num
1143         jnz     .Loop_avx
1144
1145         mov     `$REG_SZ*17+8`(%rsp),$num
1146         lea     $REG_SZ($ctx),$ctx
1147         lea     `16*$REG_SZ/4`($inp),$inp
1148         dec     $num
1149         jnz     .Loop_grande_avx
1150
1151 .Ldone_avx:
1152         mov     `$REG_SZ*17`(%rsp),%rax         # original %rsp
1153 .cfi_def_cfa    %rax,8
1154         vzeroupper
1155 ___
1156 $code.=<<___ if ($win64);
1157         movaps  -0xb8(%rax),%xmm6
1158         movaps  -0xa8(%rax),%xmm7
1159         movaps  -0x98(%rax),%xmm8
1160         movaps  -0x88(%rax),%xmm9
1161         movaps  -0x78(%rax),%xmm10
1162         movaps  -0x68(%rax),%xmm11
1163         movaps  -0x58(%rax),%xmm12
1164         movaps  -0x48(%rax),%xmm13
1165         movaps  -0x38(%rax),%xmm14
1166         movaps  -0x28(%rax),%xmm15
1167 ___
1168 $code.=<<___;
1169         mov     -16(%rax),%rbp
1170 .cfi_restore    %rbp
1171         mov     -8(%rax),%rbx
1172 .cfi_restore    %rbx
1173         lea     (%rax),%rsp
1174 .cfi_def_cfa_register   %rsp
1175 .Lepilogue_avx:
1176         ret
1177 .cfi_endproc
1178 .size   sha1_multi_block_avx,.-sha1_multi_block_avx
1179 ___
1180
1181                                                 if ($avx>1) {
1182 $code =~ s/\`([^\`]*)\`/eval $1/gem;
1183
1184 $REG_SZ=32;
1185
1186 @ptr=map("%r$_",(12..15,8..11));
1187
1188 @V=($A,$B,$C,$D,$E)=map("%ymm$_",(0..4));
1189 ($t0,$t1,$t2,$t3,$tx)=map("%ymm$_",(5..9));
1190 @Xi=map("%ymm$_",(10..14));
1191 $K="%ymm15";
1192
1193 $code.=<<___;
1194 .type   sha1_multi_block_avx2,\@function,3
1195 .align  32
1196 sha1_multi_block_avx2:
1197 .cfi_startproc
1198 _avx2_shortcut:
1199         mov     %rsp,%rax
1200 .cfi_def_cfa_register   %rax
1201         push    %rbx
1202 .cfi_push       %rbx
1203         push    %rbp
1204 .cfi_push       %rbp
1205         push    %r12
1206 .cfi_push       %r12
1207         push    %r13
1208 .cfi_push       %r13
1209         push    %r14
1210 .cfi_push       %r14
1211         push    %r15
1212 .cfi_push       %r15
1213 ___
1214 $code.=<<___ if ($win64);
1215         lea     -0xa8(%rsp),%rsp
1216         movaps  %xmm6,(%rsp)
1217         movaps  %xmm7,0x10(%rsp)
1218         movaps  %xmm8,0x20(%rsp)
1219         movaps  %xmm9,0x30(%rsp)
1220         movaps  %xmm10,0x40(%rsp)
1221         movaps  %xmm11,0x50(%rsp)
1222         movaps  %xmm12,-0x78(%rax)
1223         movaps  %xmm13,-0x68(%rax)
1224         movaps  %xmm14,-0x58(%rax)
1225         movaps  %xmm15,-0x48(%rax)
1226 ___
1227 $code.=<<___;
1228         sub     \$`$REG_SZ*18`, %rsp
1229         and     \$-256,%rsp
1230         mov     %rax,`$REG_SZ*17`(%rsp)         # original %rsp
1231 .cfi_cfa_expression     %rsp+`$REG_SZ*17`,deref,+8
1232 .Lbody_avx2:
1233         lea     K_XX_XX(%rip),$Tbl
1234         shr     \$1,$num
1235
1236         vzeroupper
1237 .Loop_grande_avx2:
1238         mov     $num,`$REG_SZ*17+8`(%rsp)       # original $num
1239         xor     $num,$num
1240         lea     `$REG_SZ*16`(%rsp),%rbx
1241 ___
1242 for($i=0;$i<8;$i++) {
1243     $code.=<<___;
1244         mov     `16*$i+0`($inp),@ptr[$i]        # input pointer
1245         mov     `16*$i+8`($inp),%ecx            # number of blocks
1246         cmp     $num,%ecx
1247         cmovg   %ecx,$num                       # find maximum
1248         test    %ecx,%ecx
1249         mov     %ecx,`4*$i`(%rbx)               # initialize counters
1250         cmovle  $Tbl,@ptr[$i]                   # cancel input
1251 ___
1252 }
1253 $code.=<<___;
1254         vmovdqu 0x00($ctx),$A                   # load context
1255          lea    128(%rsp),%rax
1256         vmovdqu 0x20($ctx),$B
1257          lea    256+128(%rsp),%rbx
1258         vmovdqu 0x40($ctx),$C
1259         vmovdqu 0x60($ctx),$D
1260         vmovdqu 0x80($ctx),$E
1261         vmovdqu 0x60($Tbl),$tx                  # pbswap_mask
1262         jmp     .Loop_avx2
1263
1264 .align  32
1265 .Loop_avx2:
1266 ___
1267 $code.="        vmovdqa -0x20($Tbl),$K\n";      # K_00_19
1268 for($i=0;$i<20;$i++)    { &BODY_00_19_avx($i,@V); unshift(@V,pop(@V)); }
1269 $code.="        vmovdqa 0x00($Tbl),$K\n";       # K_20_39
1270 for(;$i<40;$i++)        { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1271 $code.="        vmovdqa 0x20($Tbl),$K\n";       # K_40_59
1272 for(;$i<60;$i++)        { &BODY_40_59_avx($i,@V); unshift(@V,pop(@V)); }
1273 $code.="        vmovdqa 0x40($Tbl),$K\n";       # K_60_79
1274 for(;$i<80;$i++)        { &BODY_20_39_avx($i,@V); unshift(@V,pop(@V)); }
1275 $code.=<<___;
1276         mov     \$1,%ecx
1277         lea     `$REG_SZ*16`(%rsp),%rbx
1278 ___
1279 for($i=0;$i<8;$i++) {
1280     $code.=<<___;
1281         cmp     `4*$i`(%rbx),%ecx               # examine counters
1282         cmovge  $Tbl,@ptr[$i]                   # cancel input
1283 ___
1284 }
1285 $code.=<<___;
1286         vmovdqu (%rbx),$t0              # pull counters
1287         vpxor   $t2,$t2,$t2
1288         vmovdqa $t0,$t1
1289         vpcmpgtd $t2,$t1,$t1                    # mask value
1290         vpaddd  $t1,$t0,$t0                     # counters--
1291
1292         vpand   $t1,$A,$A
1293         vpand   $t1,$B,$B
1294         vpaddd  0x00($ctx),$A,$A
1295         vpand   $t1,$C,$C
1296         vpaddd  0x20($ctx),$B,$B
1297         vpand   $t1,$D,$D
1298         vpaddd  0x40($ctx),$C,$C
1299         vpand   $t1,$E,$E
1300         vpaddd  0x60($ctx),$D,$D
1301         vpaddd  0x80($ctx),$E,$E
1302         vmovdqu $A,0x00($ctx)
1303         vmovdqu $B,0x20($ctx)
1304         vmovdqu $C,0x40($ctx)
1305         vmovdqu $D,0x60($ctx)
1306         vmovdqu $E,0x80($ctx)
1307
1308         vmovdqu $t0,(%rbx)                      # save counters
1309         lea     256+128(%rsp),%rbx
1310         vmovdqu 0x60($Tbl),$tx                  # pbswap_mask
1311         dec     $num
1312         jnz     .Loop_avx2
1313
1314         #mov    `$REG_SZ*17+8`(%rsp),$num
1315         #lea    $REG_SZ($ctx),$ctx
1316         #lea    `16*$REG_SZ/4`($inp),$inp
1317         #dec    $num
1318         #jnz    .Loop_grande_avx2
1319
1320 .Ldone_avx2:
1321         mov     `$REG_SZ*17`(%rsp),%rax         # original %rsp
1322 .cfi_def_cfa    %rax,8
1323         vzeroupper
1324 ___
1325 $code.=<<___ if ($win64);
1326         movaps  -0xd8(%rax),%xmm6
1327         movaps  -0xc8(%rax),%xmm7
1328         movaps  -0xb8(%rax),%xmm8
1329         movaps  -0xa8(%rax),%xmm9
1330         movaps  -0x98(%rax),%xmm10
1331         movaps  -0x88(%rax),%xmm11
1332         movaps  -0x78(%rax),%xmm12
1333         movaps  -0x68(%rax),%xmm13
1334         movaps  -0x58(%rax),%xmm14
1335         movaps  -0x48(%rax),%xmm15
1336 ___
1337 $code.=<<___;
1338         mov     -48(%rax),%r15
1339 .cfi_restore    %r15
1340         mov     -40(%rax),%r14
1341 .cfi_restore    %r14
1342         mov     -32(%rax),%r13
1343 .cfi_restore    %r13
1344         mov     -24(%rax),%r12
1345 .cfi_restore    %r12
1346         mov     -16(%rax),%rbp
1347 .cfi_restore    %rbp
1348         mov     -8(%rax),%rbx
1349 .cfi_restore    %rbx
1350         lea     (%rax),%rsp
1351 .cfi_def_cfa_register   %rsp
1352 .Lepilogue_avx2:
1353         ret
1354 .cfi_endproc
1355 .size   sha1_multi_block_avx2,.-sha1_multi_block_avx2
1356 ___
1357                                                 }       }}}
1358 $code.=<<___;
1359
1360 .align  256
1361         .long   0x5a827999,0x5a827999,0x5a827999,0x5a827999     # K_00_19
1362         .long   0x5a827999,0x5a827999,0x5a827999,0x5a827999     # K_00_19
1363 K_XX_XX:
1364         .long   0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1     # K_20_39
1365         .long   0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1     # K_20_39
1366         .long   0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc     # K_40_59
1367         .long   0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc     # K_40_59
1368         .long   0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6     # K_60_79
1369         .long   0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6     # K_60_79
1370         .long   0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f     # pbswap
1371         .long   0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f     # pbswap
1372         .byte   0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
1373         .asciz  "SHA1 multi-block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1374 ___
1375
1376 if ($win64) {
1377 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1378 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1379 $rec="%rcx";
1380 $frame="%rdx";
1381 $context="%r8";
1382 $disp="%r9";
1383
1384 $code.=<<___;
1385 .extern __imp_RtlVirtualUnwind
1386 .type   se_handler,\@abi-omnipotent
1387 .align  16
1388 se_handler:
1389         push    %rsi
1390         push    %rdi
1391         push    %rbx
1392         push    %rbp
1393         push    %r12
1394         push    %r13
1395         push    %r14
1396         push    %r15
1397         pushfq
1398         sub     \$64,%rsp
1399
1400         mov     120($context),%rax      # pull context->Rax
1401         mov     248($context),%rbx      # pull context->Rip
1402
1403         mov     8($disp),%rsi           # disp->ImageBase
1404         mov     56($disp),%r11          # disp->HandlerData
1405
1406         mov     0(%r11),%r10d           # HandlerData[0]
1407         lea     (%rsi,%r10),%r10        # end of prologue label
1408         cmp     %r10,%rbx               # context->Rip<.Lbody
1409         jb      .Lin_prologue
1410
1411         mov     152($context),%rax      # pull context->Rsp
1412
1413         mov     4(%r11),%r10d           # HandlerData[1]
1414         lea     (%rsi,%r10),%r10        # epilogue label
1415         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
1416         jae     .Lin_prologue
1417
1418         mov     `16*17`(%rax),%rax      # pull saved stack pointer
1419
1420         mov     -8(%rax),%rbx
1421         mov     -16(%rax),%rbp
1422         mov     %rbx,144($context)      # restore context->Rbx
1423         mov     %rbp,160($context)      # restore context->Rbp
1424
1425         lea     -24-10*16(%rax),%rsi
1426         lea     512($context),%rdi      # &context.Xmm6
1427         mov     \$20,%ecx
1428         .long   0xa548f3fc              # cld; rep movsq
1429
1430 .Lin_prologue:
1431         mov     8(%rax),%rdi
1432         mov     16(%rax),%rsi
1433         mov     %rax,152($context)      # restore context->Rsp
1434         mov     %rsi,168($context)      # restore context->Rsi
1435         mov     %rdi,176($context)      # restore context->Rdi
1436
1437         mov     40($disp),%rdi          # disp->ContextRecord
1438         mov     $context,%rsi           # context
1439         mov     \$154,%ecx              # sizeof(CONTEXT)
1440         .long   0xa548f3fc              # cld; rep movsq
1441
1442         mov     $disp,%rsi
1443         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1444         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1445         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1446         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1447         mov     40(%rsi),%r10           # disp->ContextRecord
1448         lea     56(%rsi),%r11           # &disp->HandlerData
1449         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1450         mov     %r10,32(%rsp)           # arg5
1451         mov     %r11,40(%rsp)           # arg6
1452         mov     %r12,48(%rsp)           # arg7
1453         mov     %rcx,56(%rsp)           # arg8, (NULL)
1454         call    *__imp_RtlVirtualUnwind(%rip)
1455
1456         mov     \$1,%eax                # ExceptionContinueSearch
1457         add     \$64,%rsp
1458         popfq
1459         pop     %r15
1460         pop     %r14
1461         pop     %r13
1462         pop     %r12
1463         pop     %rbp
1464         pop     %rbx
1465         pop     %rdi
1466         pop     %rsi
1467         ret
1468 .size   se_handler,.-se_handler
1469 ___
1470 $code.=<<___ if ($avx>1);
1471 .type   avx2_handler,\@abi-omnipotent
1472 .align  16
1473 avx2_handler:
1474         push    %rsi
1475         push    %rdi
1476         push    %rbx
1477         push    %rbp
1478         push    %r12
1479         push    %r13
1480         push    %r14
1481         push    %r15
1482         pushfq
1483         sub     \$64,%rsp
1484
1485         mov     120($context),%rax      # pull context->Rax
1486         mov     248($context),%rbx      # pull context->Rip
1487
1488         mov     8($disp),%rsi           # disp->ImageBase
1489         mov     56($disp),%r11          # disp->HandlerData
1490
1491         mov     0(%r11),%r10d           # HandlerData[0]
1492         lea     (%rsi,%r10),%r10        # end of prologue label
1493         cmp     %r10,%rbx               # context->Rip<body label
1494         jb      .Lin_prologue
1495
1496         mov     152($context),%rax      # pull context->Rsp
1497
1498         mov     4(%r11),%r10d           # HandlerData[1]
1499         lea     (%rsi,%r10),%r10        # epilogue label
1500         cmp     %r10,%rbx               # context->Rip>=epilogue label
1501         jae     .Lin_prologue
1502
1503         mov     `32*17`($context),%rax  # pull saved stack pointer
1504
1505         mov     -8(%rax),%rbx
1506         mov     -16(%rax),%rbp
1507         mov     -24(%rax),%r12
1508         mov     -32(%rax),%r13
1509         mov     -40(%rax),%r14
1510         mov     -48(%rax),%r15
1511         mov     %rbx,144($context)      # restore context->Rbx
1512         mov     %rbp,160($context)      # restore context->Rbp
1513         mov     %r12,216($context)      # restore context->R12
1514         mov     %r13,224($context)      # restore context->R13
1515         mov     %r14,232($context)      # restore context->R14
1516         mov     %r15,240($context)      # restore context->R15
1517
1518         lea     -56-10*16(%rax),%rsi
1519         lea     512($context),%rdi      # &context.Xmm6
1520         mov     \$20,%ecx
1521         .long   0xa548f3fc              # cld; rep movsq
1522
1523         jmp     .Lin_prologue
1524 .size   avx2_handler,.-avx2_handler
1525 ___
1526 $code.=<<___;
1527 .section        .pdata
1528 .align  4
1529         .rva    .LSEH_begin_sha1_multi_block
1530         .rva    .LSEH_end_sha1_multi_block
1531         .rva    .LSEH_info_sha1_multi_block
1532         .rva    .LSEH_begin_sha1_multi_block_shaext
1533         .rva    .LSEH_end_sha1_multi_block_shaext
1534         .rva    .LSEH_info_sha1_multi_block_shaext
1535 ___
1536 $code.=<<___ if ($avx);
1537         .rva    .LSEH_begin_sha1_multi_block_avx
1538         .rva    .LSEH_end_sha1_multi_block_avx
1539         .rva    .LSEH_info_sha1_multi_block_avx
1540 ___
1541 $code.=<<___ if ($avx>1);
1542         .rva    .LSEH_begin_sha1_multi_block_avx2
1543         .rva    .LSEH_end_sha1_multi_block_avx2
1544         .rva    .LSEH_info_sha1_multi_block_avx2
1545 ___
1546 $code.=<<___;
1547 .section        .xdata
1548 .align  8
1549 .LSEH_info_sha1_multi_block:
1550         .byte   9,0,0,0
1551         .rva    se_handler
1552         .rva    .Lbody,.Lepilogue                       # HandlerData[]
1553 .LSEH_info_sha1_multi_block_shaext:
1554         .byte   9,0,0,0
1555         .rva    se_handler
1556         .rva    .Lbody_shaext,.Lepilogue_shaext # HandlerData[]
1557 ___
1558 $code.=<<___ if ($avx);
1559 .LSEH_info_sha1_multi_block_avx:
1560         .byte   9,0,0,0
1561         .rva    se_handler
1562         .rva    .Lbody_avx,.Lepilogue_avx               # HandlerData[]
1563 ___
1564 $code.=<<___ if ($avx>1);
1565 .LSEH_info_sha1_multi_block_avx2:
1566         .byte   9,0,0,0
1567         .rva    avx2_handler
1568         .rva    .Lbody_avx2,.Lepilogue_avx2             # HandlerData[]
1569 ___
1570 }
1571 ####################################################################
1572
1573 sub rex {
1574   local *opcode=shift;
1575   my ($dst,$src)=@_;
1576   my $rex=0;
1577
1578     $rex|=0x04                  if ($dst>=8);
1579     $rex|=0x01                  if ($src>=8);
1580     unshift @opcode,$rex|0x40   if ($rex);
1581 }
1582
1583 sub sha1rnds4 {
1584     if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
1585       my @opcode=(0x0f,0x3a,0xcc);
1586         rex(\@opcode,$3,$2);
1587         push @opcode,0xc0|($2&7)|(($3&7)<<3);           # ModR/M
1588         my $c=$1;
1589         push @opcode,$c=~/^0/?oct($c):$c;
1590         return ".byte\t".join(',',@opcode);
1591     } else {
1592         return "sha1rnds4\t".@_[0];
1593     }
1594 }
1595
1596 sub sha1op38 {
1597     my $instr = shift;
1598     my %opcodelet = (
1599                 "sha1nexte" => 0xc8,
1600                 "sha1msg1"  => 0xc9,
1601                 "sha1msg2"  => 0xca     );
1602
1603     if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
1604       my @opcode=(0x0f,0x38);
1605         rex(\@opcode,$2,$1);
1606         push @opcode,$opcodelet{$instr};
1607         push @opcode,0xc0|($1&7)|(($2&7)<<3);           # ModR/M
1608         return ".byte\t".join(',',@opcode);
1609     } else {
1610         return $instr."\t".@_[0];
1611     }
1612 }
1613
1614 foreach (split("\n",$code)) {
1615         s/\`([^\`]*)\`/eval($1)/ge;
1616
1617         s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo                or
1618         s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo             or
1619
1620         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1621         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1622         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go    or
1623         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1624         s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go            or
1625         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1626
1627         print $_,"\n";
1628 }
1629
1630 close STDOUT;