ARMv8 assembly pack: add Samsung Mongoose results.
[openssl.git] / crypto / sha / asm / sha512-armv8.pl
1 #! /usr/bin/env perl
2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # SHA256/512 for ARMv8.
18 #
19 # Performance in cycles per processed byte and improvement coefficient
20 # over code generated with "default" compiler:
21 #
22 #               SHA256-hw       SHA256(*)       SHA512
23 # Apple A7      1.97            10.5 (+33%)     6.73 (-1%(**))
24 # Cortex-A53    2.38            15.5 (+115%)    10.0 (+150%(***))
25 # Cortex-A57    2.31            11.6 (+86%)     7.51 (+260%(***))
26 # Denver        2.01            10.5 (+26%)     6.70 (+8%)
27 # X-Gene                        20.0 (+100%)    12.8 (+300%(***))
28 # Mongoose      2.36            13.0 (+50%)     8.36 (+33%)
29
30 # (*)   Software SHA256 results are of lesser relevance, presented
31 #       mostly for informational purposes.
32 # (**)  The result is a trade-off: it's possible to improve it by
33 #       10% (or by 1 cycle per round), but at the cost of 20% loss
34 #       on Cortex-A53 (or by 4 cycles per round).
35 # (***) Super-impressive coefficients over gcc-generated code are
36 #       indication of some compiler "pathology", most notably code
37 #       generated with -mgeneral-regs-only is significanty faster
38 #       and the gap is only 40-90%.
39
40 $flavour=shift;
41 $output=shift;
42
43 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
44 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
45 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
46 die "can't locate arm-xlate.pl";
47
48 open OUT,"| \"$^X\" $xlate $flavour $output";
49 *STDOUT=*OUT;
50
51 if ($output =~ /512/) {
52         $BITS=512;
53         $SZ=8;
54         @Sigma0=(28,34,39);
55         @Sigma1=(14,18,41);
56         @sigma0=(1,  8, 7);
57         @sigma1=(19,61, 6);
58         $rounds=80;
59         $reg_t="x";
60 } else {
61         $BITS=256;
62         $SZ=4;
63         @Sigma0=( 2,13,22);
64         @Sigma1=( 6,11,25);
65         @sigma0=( 7,18, 3);
66         @sigma1=(17,19,10);
67         $rounds=64;
68         $reg_t="w";
69 }
70
71 $func="sha${BITS}_block_data_order";
72
73 ($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
74
75 @X=map("$reg_t$_",(3..15,0..2));
76 @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
77 ($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
78
79 sub BODY_00_xx {
80 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
81 my $j=($i+1)&15;
82 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
83    $T0=@X[$i+3] if ($i<11);
84
85 $code.=<<___    if ($i<16);
86 #ifndef __ARMEB__
87         rev     @X[$i],@X[$i]                   // $i
88 #endif
89 ___
90 $code.=<<___    if ($i<13 && ($i&1));
91         ldp     @X[$i+1],@X[$i+2],[$inp],#2*$SZ
92 ___
93 $code.=<<___    if ($i==13);
94         ldp     @X[14],@X[15],[$inp]
95 ___
96 $code.=<<___    if ($i>=14);
97         ldr     @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
98 ___
99 $code.=<<___    if ($i>0 && $i<16);
100         add     $a,$a,$t1                       // h+=Sigma0(a)
101 ___
102 $code.=<<___    if ($i>=11);
103         str     @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
104 ___
105 # While ARMv8 specifies merged rotate-n-logical operation such as
106 # 'eor x,y,z,ror#n', it was found to negatively affect performance
107 # on Apple A7. The reason seems to be that it requires even 'y' to
108 # be available earlier. This means that such merged instruction is
109 # not necessarily best choice on critical path... On the other hand
110 # Cortex-A5x handles merged instructions much better than disjoint
111 # rotate and logical... See (**) footnote above.
112 $code.=<<___    if ($i<15);
113         ror     $t0,$e,#$Sigma1[0]
114         add     $h,$h,$t2                       // h+=K[i]
115         eor     $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
116         and     $t1,$f,$e
117         bic     $t2,$g,$e
118         add     $h,$h,@X[$i&15]                 // h+=X[i]
119         orr     $t1,$t1,$t2                     // Ch(e,f,g)
120         eor     $t2,$a,$b                       // a^b, b^c in next round
121         eor     $t0,$t0,$T0,ror#$Sigma1[1]      // Sigma1(e)
122         ror     $T0,$a,#$Sigma0[0]
123         add     $h,$h,$t1                       // h+=Ch(e,f,g)
124         eor     $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
125         add     $h,$h,$t0                       // h+=Sigma1(e)
126         and     $t3,$t3,$t2                     // (b^c)&=(a^b)
127         add     $d,$d,$h                        // d+=h
128         eor     $t3,$t3,$b                      // Maj(a,b,c)
129         eor     $t1,$T0,$t1,ror#$Sigma0[1]      // Sigma0(a)
130         add     $h,$h,$t3                       // h+=Maj(a,b,c)
131         ldr     $t3,[$Ktbl],#$SZ                // *K++, $t2 in next round
132         //add   $h,$h,$t1                       // h+=Sigma0(a)
133 ___
134 $code.=<<___    if ($i>=15);
135         ror     $t0,$e,#$Sigma1[0]
136         add     $h,$h,$t2                       // h+=K[i]
137         ror     $T1,@X[($j+1)&15],#$sigma0[0]
138         and     $t1,$f,$e
139         ror     $T2,@X[($j+14)&15],#$sigma1[0]
140         bic     $t2,$g,$e
141         ror     $T0,$a,#$Sigma0[0]
142         add     $h,$h,@X[$i&15]                 // h+=X[i]
143         eor     $t0,$t0,$e,ror#$Sigma1[1]
144         eor     $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
145         orr     $t1,$t1,$t2                     // Ch(e,f,g)
146         eor     $t2,$a,$b                       // a^b, b^c in next round
147         eor     $t0,$t0,$e,ror#$Sigma1[2]       // Sigma1(e)
148         eor     $T0,$T0,$a,ror#$Sigma0[1]
149         add     $h,$h,$t1                       // h+=Ch(e,f,g)
150         and     $t3,$t3,$t2                     // (b^c)&=(a^b)
151         eor     $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
152         eor     $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2]    // sigma0(X[i+1])
153         add     $h,$h,$t0                       // h+=Sigma1(e)
154         eor     $t3,$t3,$b                      // Maj(a,b,c)
155         eor     $t1,$T0,$a,ror#$Sigma0[2]       // Sigma0(a)
156         eor     $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2]   // sigma1(X[i+14])
157         add     @X[$j],@X[$j],@X[($j+9)&15]
158         add     $d,$d,$h                        // d+=h
159         add     $h,$h,$t3                       // h+=Maj(a,b,c)
160         ldr     $t3,[$Ktbl],#$SZ                // *K++, $t2 in next round
161         add     @X[$j],@X[$j],$T1
162         add     $h,$h,$t1                       // h+=Sigma0(a)
163         add     @X[$j],@X[$j],$T2
164 ___
165         ($t2,$t3)=($t3,$t2);
166 }
167
168 $code.=<<___;
169 #include "arm_arch.h"
170
171 .text
172
173 .extern OPENSSL_armcap_P
174 .globl  $func
175 .type   $func,%function
176 .align  6
177 $func:
178 ___
179 $code.=<<___    if ($SZ==4);
180 #ifdef  __ILP32__
181         ldrsw   x16,.LOPENSSL_armcap_P
182 #else
183         ldr     x16,.LOPENSSL_armcap_P
184 #endif
185         adr     x17,.LOPENSSL_armcap_P
186         add     x16,x16,x17
187         ldr     w16,[x16]
188         tst     w16,#ARMV8_SHA256
189         b.ne    .Lv8_entry
190 ___
191 $code.=<<___;
192         stp     x29,x30,[sp,#-128]!
193         add     x29,sp,#0
194
195         stp     x19,x20,[sp,#16]
196         stp     x21,x22,[sp,#32]
197         stp     x23,x24,[sp,#48]
198         stp     x25,x26,[sp,#64]
199         stp     x27,x28,[sp,#80]
200         sub     sp,sp,#4*$SZ
201
202         ldp     $A,$B,[$ctx]                            // load context
203         ldp     $C,$D,[$ctx,#2*$SZ]
204         ldp     $E,$F,[$ctx,#4*$SZ]
205         add     $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
206         ldp     $G,$H,[$ctx,#6*$SZ]
207         adr     $Ktbl,.LK$BITS
208         stp     $ctx,$num,[x29,#96]
209
210 .Loop:
211         ldp     @X[0],@X[1],[$inp],#2*$SZ
212         ldr     $t2,[$Ktbl],#$SZ                        // *K++
213         eor     $t3,$B,$C                               // magic seed
214         str     $inp,[x29,#112]
215 ___
216 for ($i=0;$i<16;$i++)   { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
217 $code.=".Loop_16_xx:\n";
218 for (;$i<32;$i++)       { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
219 $code.=<<___;
220         cbnz    $t2,.Loop_16_xx
221
222         ldp     $ctx,$num,[x29,#96]
223         ldr     $inp,[x29,#112]
224         sub     $Ktbl,$Ktbl,#`$SZ*($rounds+1)`          // rewind
225
226         ldp     @X[0],@X[1],[$ctx]
227         ldp     @X[2],@X[3],[$ctx,#2*$SZ]
228         add     $inp,$inp,#14*$SZ                       // advance input pointer
229         ldp     @X[4],@X[5],[$ctx,#4*$SZ]
230         add     $A,$A,@X[0]
231         ldp     @X[6],@X[7],[$ctx,#6*$SZ]
232         add     $B,$B,@X[1]
233         add     $C,$C,@X[2]
234         add     $D,$D,@X[3]
235         stp     $A,$B,[$ctx]
236         add     $E,$E,@X[4]
237         add     $F,$F,@X[5]
238         stp     $C,$D,[$ctx,#2*$SZ]
239         add     $G,$G,@X[6]
240         add     $H,$H,@X[7]
241         cmp     $inp,$num
242         stp     $E,$F,[$ctx,#4*$SZ]
243         stp     $G,$H,[$ctx,#6*$SZ]
244         b.ne    .Loop
245
246         ldp     x19,x20,[x29,#16]
247         add     sp,sp,#4*$SZ
248         ldp     x21,x22,[x29,#32]
249         ldp     x23,x24,[x29,#48]
250         ldp     x25,x26,[x29,#64]
251         ldp     x27,x28,[x29,#80]
252         ldp     x29,x30,[sp],#128
253         ret
254 .size   $func,.-$func
255
256 .align  6
257 .type   .LK$BITS,%object
258 .LK$BITS:
259 ___
260 $code.=<<___ if ($SZ==8);
261         .quad   0x428a2f98d728ae22,0x7137449123ef65cd
262         .quad   0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
263         .quad   0x3956c25bf348b538,0x59f111f1b605d019
264         .quad   0x923f82a4af194f9b,0xab1c5ed5da6d8118
265         .quad   0xd807aa98a3030242,0x12835b0145706fbe
266         .quad   0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
267         .quad   0x72be5d74f27b896f,0x80deb1fe3b1696b1
268         .quad   0x9bdc06a725c71235,0xc19bf174cf692694
269         .quad   0xe49b69c19ef14ad2,0xefbe4786384f25e3
270         .quad   0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
271         .quad   0x2de92c6f592b0275,0x4a7484aa6ea6e483
272         .quad   0x5cb0a9dcbd41fbd4,0x76f988da831153b5
273         .quad   0x983e5152ee66dfab,0xa831c66d2db43210
274         .quad   0xb00327c898fb213f,0xbf597fc7beef0ee4
275         .quad   0xc6e00bf33da88fc2,0xd5a79147930aa725
276         .quad   0x06ca6351e003826f,0x142929670a0e6e70
277         .quad   0x27b70a8546d22ffc,0x2e1b21385c26c926
278         .quad   0x4d2c6dfc5ac42aed,0x53380d139d95b3df
279         .quad   0x650a73548baf63de,0x766a0abb3c77b2a8
280         .quad   0x81c2c92e47edaee6,0x92722c851482353b
281         .quad   0xa2bfe8a14cf10364,0xa81a664bbc423001
282         .quad   0xc24b8b70d0f89791,0xc76c51a30654be30
283         .quad   0xd192e819d6ef5218,0xd69906245565a910
284         .quad   0xf40e35855771202a,0x106aa07032bbd1b8
285         .quad   0x19a4c116b8d2d0c8,0x1e376c085141ab53
286         .quad   0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
287         .quad   0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
288         .quad   0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
289         .quad   0x748f82ee5defb2fc,0x78a5636f43172f60
290         .quad   0x84c87814a1f0ab72,0x8cc702081a6439ec
291         .quad   0x90befffa23631e28,0xa4506cebde82bde9
292         .quad   0xbef9a3f7b2c67915,0xc67178f2e372532b
293         .quad   0xca273eceea26619c,0xd186b8c721c0c207
294         .quad   0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
295         .quad   0x06f067aa72176fba,0x0a637dc5a2c898a6
296         .quad   0x113f9804bef90dae,0x1b710b35131c471b
297         .quad   0x28db77f523047d84,0x32caab7b40c72493
298         .quad   0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
299         .quad   0x4cc5d4becb3e42b6,0x597f299cfc657e2a
300         .quad   0x5fcb6fab3ad6faec,0x6c44198c4a475817
301         .quad   0       // terminator
302 ___
303 $code.=<<___ if ($SZ==4);
304         .long   0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
305         .long   0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
306         .long   0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
307         .long   0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
308         .long   0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
309         .long   0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
310         .long   0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
311         .long   0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
312         .long   0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
313         .long   0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
314         .long   0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
315         .long   0xd192e819,0xd6990624,0xf40e3585,0x106aa070
316         .long   0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
317         .long   0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
318         .long   0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
319         .long   0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
320         .long   0       //terminator
321 ___
322 $code.=<<___;
323 .size   .LK$BITS,.-.LK$BITS
324 .align  3
325 .LOPENSSL_armcap_P:
326 #ifdef  __ILP32__
327         .long   OPENSSL_armcap_P-.
328 #else
329         .quad   OPENSSL_armcap_P-.
330 #endif
331 .asciz  "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
332 .align  2
333 ___
334
335 if ($SZ==4) {
336 my $Ktbl="x3";
337
338 my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
339 my @MSG=map("v$_.16b",(4..7));
340 my ($W0,$W1)=("v16.4s","v17.4s");
341 my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
342
343 $code.=<<___;
344 .type   sha256_block_armv8,%function
345 .align  6
346 sha256_block_armv8:
347 .Lv8_entry:
348         stp             x29,x30,[sp,#-16]!
349         add             x29,sp,#0
350
351         ld1.32          {$ABCD,$EFGH},[$ctx]
352         adr             $Ktbl,.LK256
353
354 .Loop_hw:
355         ld1             {@MSG[0]-@MSG[3]},[$inp],#64
356         sub             $num,$num,#1
357         ld1.32          {$W0},[$Ktbl],#16
358         rev32           @MSG[0],@MSG[0]
359         rev32           @MSG[1],@MSG[1]
360         rev32           @MSG[2],@MSG[2]
361         rev32           @MSG[3],@MSG[3]
362         orr             $ABCD_SAVE,$ABCD,$ABCD          // offload
363         orr             $EFGH_SAVE,$EFGH,$EFGH
364 ___
365 for($i=0;$i<12;$i++) {
366 $code.=<<___;
367         ld1.32          {$W1},[$Ktbl],#16
368         add.i32         $W0,$W0,@MSG[0]
369         sha256su0       @MSG[0],@MSG[1]
370         orr             $abcd,$ABCD,$ABCD
371         sha256h         $ABCD,$EFGH,$W0
372         sha256h2        $EFGH,$abcd,$W0
373         sha256su1       @MSG[0],@MSG[2],@MSG[3]
374 ___
375         ($W0,$W1)=($W1,$W0);    push(@MSG,shift(@MSG));
376 }
377 $code.=<<___;
378         ld1.32          {$W1},[$Ktbl],#16
379         add.i32         $W0,$W0,@MSG[0]
380         orr             $abcd,$ABCD,$ABCD
381         sha256h         $ABCD,$EFGH,$W0
382         sha256h2        $EFGH,$abcd,$W0
383
384         ld1.32          {$W0},[$Ktbl],#16
385         add.i32         $W1,$W1,@MSG[1]
386         orr             $abcd,$ABCD,$ABCD
387         sha256h         $ABCD,$EFGH,$W1
388         sha256h2        $EFGH,$abcd,$W1
389
390         ld1.32          {$W1},[$Ktbl]
391         add.i32         $W0,$W0,@MSG[2]
392         sub             $Ktbl,$Ktbl,#$rounds*$SZ-16     // rewind
393         orr             $abcd,$ABCD,$ABCD
394         sha256h         $ABCD,$EFGH,$W0
395         sha256h2        $EFGH,$abcd,$W0
396
397         add.i32         $W1,$W1,@MSG[3]
398         orr             $abcd,$ABCD,$ABCD
399         sha256h         $ABCD,$EFGH,$W1
400         sha256h2        $EFGH,$abcd,$W1
401
402         add.i32         $ABCD,$ABCD,$ABCD_SAVE
403         add.i32         $EFGH,$EFGH,$EFGH_SAVE
404
405         cbnz            $num,.Loop_hw
406
407         st1.32          {$ABCD,$EFGH},[$ctx]
408
409         ldr             x29,[sp],#16
410         ret
411 .size   sha256_block_armv8,.-sha256_block_armv8
412 ___
413 }
414
415 $code.=<<___;
416 .comm   OPENSSL_armcap_P,4,4
417 ___
418
419 {   my  %opcode = (
420         "sha256h"       => 0x5e004000,  "sha256h2"      => 0x5e005000,
421         "sha256su0"     => 0x5e282800,  "sha256su1"     => 0x5e006000   );
422
423     sub unsha256 {
424         my ($mnemonic,$arg)=@_;
425
426         $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
427         &&
428         sprintf ".inst\t0x%08x\t//%s %s",
429                         $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
430                         $mnemonic,$arg;
431     }
432 }
433
434 foreach(split("\n",$code)) {
435
436         s/\`([^\`]*)\`/eval($1)/geo;
437
438         s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/geo;
439
440         s/\.\w?32\b//o          and s/\.16b/\.4s/go;
441         m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go;
442
443         print $_,"\n";
444 }
445
446 close STDOUT;