2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
9 # ====================================================================
10 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
11 # project. The module is, however, dual licensed under OpenSSL and
12 # CRYPTOGAMS licenses depending on where you obtain it. For further
13 # details see http://www.openssl.org/~appro/cryptogams/.
15 # Permission to use under GPLv2 terms is granted.
16 # ====================================================================
18 # SHA256/512 for ARMv8.
20 # Performance in cycles per processed byte and improvement coefficient
21 # over code generated with "default" compiler:
23 # SHA256-hw SHA256(*) SHA512
24 # Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
25 # Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
26 # Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
27 # Denver 2.01 10.5 (+26%) 6.70 (+8%)
28 # X-Gene 20.0 (+100%) 12.8 (+300%(***))
29 # Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
30 # Kryo 1.92 17.4 (+30%) 11.2 (+8%)
32 # (*) Software SHA256 results are of lesser relevance, presented
33 # mostly for informational purposes.
34 # (**) The result is a trade-off: it's possible to improve it by
35 # 10% (or by 1 cycle per round), but at the cost of 20% loss
36 # on Cortex-A53 (or by 4 cycles per round).
37 # (***) Super-impressive coefficients over gcc-generated code are
38 # indication of some compiler "pathology", most notably code
39 # generated with -mgeneral-regs-only is significantly faster
40 # and the gap is only 40-90%.
44 # Originally it was reckoned that it makes no sense to implement NEON
45 # version of SHA256 for 64-bit processors. This is because performance
46 # improvement on most wide-spread Cortex-A5x processors was observed
47 # to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
48 # observed that 32-bit NEON SHA256 performs significantly better than
49 # 64-bit scalar version on *some* of the more recent processors. As
50 # result 64-bit NEON version of SHA256 was added to provide best
51 # all-round performance. For example it executes ~30% faster on X-Gene
52 # and Mongoose. [For reference, NEON version of SHA512 is bound to
53 # deliver much less improvement, likely *negative* on Cortex-A5x.
54 # Which is why NEON support is limited to SHA256.]
59 if ($flavour && $flavour ne "void") {
60 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
61 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
62 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
63 die "can't locate arm-xlate.pl";
65 open OUT,"| \"$^X\" $xlate $flavour $output";
68 open STDOUT,">$output";
71 if ($output =~ /512/) {
91 $func="sha${BITS}_block_data_order";
93 ($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
95 @X=map("$reg_t$_",(3..15,0..2));
96 @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
97 ($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
100 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
102 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
103 $T0=@X[$i+3] if ($i<11);
105 $code.=<<___ if ($i<16);
106 #ifndef __AARCH64EB__
107 rev @X[$i],@X[$i] // $i
110 $code.=<<___ if ($i<13 && ($i&1));
111 ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ
113 $code.=<<___ if ($i==13);
114 ldp @X[14],@X[15],[$inp]
116 $code.=<<___ if ($i>=14);
117 ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
119 $code.=<<___ if ($i>0 && $i<16);
120 add $a,$a,$t1 // h+=Sigma0(a)
122 $code.=<<___ if ($i>=11);
123 str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
125 # While ARMv8 specifies merged rotate-n-logical operation such as
126 # 'eor x,y,z,ror#n', it was found to negatively affect performance
127 # on Apple A7. The reason seems to be that it requires even 'y' to
128 # be available earlier. This means that such merged instruction is
129 # not necessarily best choice on critical path... On the other hand
130 # Cortex-A5x handles merged instructions much better than disjoint
131 # rotate and logical... See (**) footnote above.
132 $code.=<<___ if ($i<15);
133 ror $t0,$e,#$Sigma1[0]
134 add $h,$h,$t2 // h+=K[i]
135 eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
138 add $h,$h,@X[$i&15] // h+=X[i]
139 orr $t1,$t1,$t2 // Ch(e,f,g)
140 eor $t2,$a,$b // a^b, b^c in next round
141 eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e)
142 ror $T0,$a,#$Sigma0[0]
143 add $h,$h,$t1 // h+=Ch(e,f,g)
144 eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
145 add $h,$h,$t0 // h+=Sigma1(e)
146 and $t3,$t3,$t2 // (b^c)&=(a^b)
148 eor $t3,$t3,$b // Maj(a,b,c)
149 eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a)
150 add $h,$h,$t3 // h+=Maj(a,b,c)
151 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
152 //add $h,$h,$t1 // h+=Sigma0(a)
154 $code.=<<___ if ($i>=15);
155 ror $t0,$e,#$Sigma1[0]
156 add $h,$h,$t2 // h+=K[i]
157 ror $T1,@X[($j+1)&15],#$sigma0[0]
159 ror $T2,@X[($j+14)&15],#$sigma1[0]
161 ror $T0,$a,#$Sigma0[0]
162 add $h,$h,@X[$i&15] // h+=X[i]
163 eor $t0,$t0,$e,ror#$Sigma1[1]
164 eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
165 orr $t1,$t1,$t2 // Ch(e,f,g)
166 eor $t2,$a,$b // a^b, b^c in next round
167 eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e)
168 eor $T0,$T0,$a,ror#$Sigma0[1]
169 add $h,$h,$t1 // h+=Ch(e,f,g)
170 and $t3,$t3,$t2 // (b^c)&=(a^b)
171 eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
172 eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
173 add $h,$h,$t0 // h+=Sigma1(e)
174 eor $t3,$t3,$b // Maj(a,b,c)
175 eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a)
176 eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14])
177 add @X[$j],@X[$j],@X[($j+9)&15]
179 add $h,$h,$t3 // h+=Maj(a,b,c)
180 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
181 add @X[$j],@X[$j],$T1
182 add $h,$h,$t1 // h+=Sigma0(a)
183 add @X[$j],@X[$j],$T2
190 # include "arm_arch.h"
195 .extern OPENSSL_armcap_P
197 .type $func,%function
201 $code.=<<___ if ($SZ==4);
204 ldrsw x16,.LOPENSSL_armcap_P
206 ldr x16,.LOPENSSL_armcap_P
208 adr x17,.LOPENSSL_armcap_P
211 tst w16,#ARMV8_SHA256
218 stp x29,x30,[sp,#-128]!
228 ldp $A,$B,[$ctx] // load context
229 ldp $C,$D,[$ctx,#2*$SZ]
230 ldp $E,$F,[$ctx,#4*$SZ]
231 add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
232 ldp $G,$H,[$ctx,#6*$SZ]
234 stp $ctx,$num,[x29,#96]
237 ldp @X[0],@X[1],[$inp],#2*$SZ
238 ldr $t2,[$Ktbl],#$SZ // *K++
239 eor $t3,$B,$C // magic seed
242 for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
243 $code.=".Loop_16_xx:\n";
244 for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
248 ldp $ctx,$num,[x29,#96]
250 sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind
252 ldp @X[0],@X[1],[$ctx]
253 ldp @X[2],@X[3],[$ctx,#2*$SZ]
254 add $inp,$inp,#14*$SZ // advance input pointer
255 ldp @X[4],@X[5],[$ctx,#4*$SZ]
257 ldp @X[6],@X[7],[$ctx,#6*$SZ]
264 stp $C,$D,[$ctx,#2*$SZ]
268 stp $E,$F,[$ctx,#4*$SZ]
269 stp $G,$H,[$ctx,#6*$SZ]
272 ldp x19,x20,[x29,#16]
274 ldp x21,x22,[x29,#32]
275 ldp x23,x24,[x29,#48]
276 ldp x25,x26,[x29,#64]
277 ldp x27,x28,[x29,#80]
278 ldp x29,x30,[sp],#128
283 .type .LK$BITS,%object
286 $code.=<<___ if ($SZ==8);
287 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
288 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
289 .quad 0x3956c25bf348b538,0x59f111f1b605d019
290 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
291 .quad 0xd807aa98a3030242,0x12835b0145706fbe
292 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
293 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
294 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
295 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
296 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
297 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
298 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
299 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
300 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
301 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
302 .quad 0x06ca6351e003826f,0x142929670a0e6e70
303 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
304 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
305 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
306 .quad 0x81c2c92e47edaee6,0x92722c851482353b
307 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
308 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
309 .quad 0xd192e819d6ef5218,0xd69906245565a910
310 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
311 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
312 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
313 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
314 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
315 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
316 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
317 .quad 0x90befffa23631e28,0xa4506cebde82bde9
318 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
319 .quad 0xca273eceea26619c,0xd186b8c721c0c207
320 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
321 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
322 .quad 0x113f9804bef90dae,0x1b710b35131c471b
323 .quad 0x28db77f523047d84,0x32caab7b40c72493
324 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
325 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
326 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
327 .quad 0 // terminator
329 $code.=<<___ if ($SZ==4);
330 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
331 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
332 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
333 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
334 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
335 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
336 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
337 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
338 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
339 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
340 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
341 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
342 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
343 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
344 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
345 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
349 .size .LK$BITS,.-.LK$BITS
354 .long OPENSSL_armcap_P-.
356 .quad OPENSSL_armcap_P-.
359 .asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
366 my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
367 my @MSG=map("v$_.16b",(4..7));
368 my ($W0,$W1)=("v16.4s","v17.4s");
369 my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
373 .type sha256_block_armv8,%function
377 stp x29,x30,[sp,#-16]!
380 ld1.32 {$ABCD,$EFGH},[$ctx]
384 ld1 {@MSG[0]-@MSG[3]},[$inp],#64
386 ld1.32 {$W0},[$Ktbl],#16
387 rev32 @MSG[0],@MSG[0]
388 rev32 @MSG[1],@MSG[1]
389 rev32 @MSG[2],@MSG[2]
390 rev32 @MSG[3],@MSG[3]
391 orr $ABCD_SAVE,$ABCD,$ABCD // offload
392 orr $EFGH_SAVE,$EFGH,$EFGH
394 for($i=0;$i<12;$i++) {
396 ld1.32 {$W1},[$Ktbl],#16
397 add.i32 $W0,$W0,@MSG[0]
398 sha256su0 @MSG[0],@MSG[1]
399 orr $abcd,$ABCD,$ABCD
400 sha256h $ABCD,$EFGH,$W0
401 sha256h2 $EFGH,$abcd,$W0
402 sha256su1 @MSG[0],@MSG[2],@MSG[3]
404 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
407 ld1.32 {$W1},[$Ktbl],#16
408 add.i32 $W0,$W0,@MSG[0]
409 orr $abcd,$ABCD,$ABCD
410 sha256h $ABCD,$EFGH,$W0
411 sha256h2 $EFGH,$abcd,$W0
413 ld1.32 {$W0},[$Ktbl],#16
414 add.i32 $W1,$W1,@MSG[1]
415 orr $abcd,$ABCD,$ABCD
416 sha256h $ABCD,$EFGH,$W1
417 sha256h2 $EFGH,$abcd,$W1
420 add.i32 $W0,$W0,@MSG[2]
421 sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind
422 orr $abcd,$ABCD,$ABCD
423 sha256h $ABCD,$EFGH,$W0
424 sha256h2 $EFGH,$abcd,$W0
426 add.i32 $W1,$W1,@MSG[3]
427 orr $abcd,$ABCD,$ABCD
428 sha256h $ABCD,$EFGH,$W1
429 sha256h2 $EFGH,$abcd,$W1
431 add.i32 $ABCD,$ABCD,$ABCD_SAVE
432 add.i32 $EFGH,$EFGH,$EFGH_SAVE
436 st1.32 {$ABCD,$EFGH},[$ctx]
440 .size sha256_block_armv8,.-sha256_block_armv8
445 if ($SZ==4) { ######################################### NEON stuff #
446 # You'll surely note a lot of similarities with sha256-armv4 module,
447 # and of course it's not a coincidence. sha256-armv4 was used as
448 # initial template, but was adapted for ARMv8 instruction set and
449 # extensively re-tuned for all-round performance.
451 my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10));
452 my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15));
455 my @X = map("q$_",(0..3));
456 my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19));
459 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
460 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
462 $arg = "#$arg" if ($arg*1 eq $arg);
463 $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
466 sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; }
467 sub Dlo { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; }
468 sub Dhi { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; }
473 my @insns = (&$body,&$body,&$body,&$body);
474 my ($a,$b,$c,$d,$e,$f,$g,$h);
476 &ext_8 ($T0,@X[0],@X[1],4); # X[1..4]
480 &ext_8 ($T3,@X[2],@X[3],4); # X[9..12]
483 &mov (&Dscalar($T7),&Dhi(@X[3])); # X[14..15]
486 &ushr_32 ($T2,$T0,$sigma0[0]);
488 &ushr_32 ($T1,$T0,$sigma0[2]);
490 &add_32 (@X[0],@X[0],$T3); # X[0..3] += X[9..12]
492 &sli_32 ($T2,$T0,32-$sigma0[0]);
495 &ushr_32 ($T3,$T0,$sigma0[1]);
498 &eor_8 ($T1,$T1,$T2);
501 &sli_32 ($T3,$T0,32-$sigma0[1]);
504 &ushr_32 ($T4,$T7,$sigma1[0]);
507 &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4])
510 &sli_32 ($T4,$T7,32-$sigma1[0]);
513 &ushr_32 ($T5,$T7,$sigma1[2]);
516 &ushr_32 ($T3,$T7,$sigma1[1]);
519 &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
522 &sli_u32 ($T3,$T7,32-$sigma1[1]);
525 &eor_8 ($T5,$T5,$T4);
529 &eor_8 ($T5,$T5,$T3); # sigma1(X[14..15])
533 &add_32 (@X[0],@X[0],$T5); # X[0..1] += sigma1(X[14..15])
537 &ushr_32 ($T6,@X[0],$sigma1[0]);
539 &ushr_32 ($T7,@X[0],$sigma1[2]);
542 &sli_32 ($T6,@X[0],32-$sigma1[0]);
544 &ushr_32 ($T5,@X[0],$sigma1[1]);
547 &eor_8 ($T7,$T7,$T6);
550 &sli_32 ($T5,@X[0],32-$sigma1[1]);
553 &ld1_32 ("{$T0}","[$Ktbl], #16");
555 &eor_8 ($T7,$T7,$T5); # sigma1(X[16..17])
558 &eor_8 ($T5,$T5,$T5);
561 &mov (&Dhi($T5), &Dlo($T7));
565 &add_32 (@X[0],@X[0],$T5); # X[2..3] += sigma1(X[16..17])
569 &add_32 ($T0,$T0,@X[0]);
570 while($#insns>=1) { eval(shift(@insns)); }
571 &st1_32 ("{$T0}","[$Xfer], #16");
574 push(@X,shift(@X)); # "rotate" X[]
580 my @insns = (&$body,&$body,&$body,&$body);
581 my ($a,$b,$c,$d,$e,$f,$g,$h);
585 &ld1_8 ("{@X[0]}","[$inp],#16");
588 &ld1_32 ("{$T0}","[$Ktbl],#16");
593 &rev32 (@X[0],@X[0]);
598 &add_32 ($T0,$T0,@X[0]);
599 foreach (@insns) { eval; } # remaining instructions
600 &st1_32 ("{$T0}","[$Xfer], #16");
602 push(@X,shift(@X)); # "rotate" X[]
607 '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
608 '&add ($h,$h,$t1)', # h+=X[i]+K[i]
609 '&add ($a,$a,$t4);'. # h+=Sigma0(a) from the past
612 '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
613 '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
614 '&orr ($t1,$t1,$t4)', # Ch(e,f,g)
615 '&eor ($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
616 '&eor ($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
617 '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
618 '&ror ($t0,$t0,"#$Sigma1[0]")',
619 '&eor ($t2,$a,$b)', # a^b, b^c in next round
620 '&eor ($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
621 '&add ($h,$h,$t0)', # h+=Sigma1(e)
622 '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
623 '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
624 '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
625 '&ror ($t4,$t4,"#$Sigma0[0]")',
626 '&add ($d,$d,$h)', # d+=h
627 '&eor ($t3,$t3,$b)', # Maj(a,b,c)
628 '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
634 .globl sha256_block_neon
636 .type sha256_block_neon,%function
640 stp x29, x30, [sp, #-16]!
645 add $num,$inp,$num,lsl#6 // len to point at the end of inp
647 ld1.8 {@X[0]},[$inp], #16
648 ld1.8 {@X[1]},[$inp], #16
649 ld1.8 {@X[2]},[$inp], #16
650 ld1.8 {@X[3]},[$inp], #16
651 ld1.32 {$T0},[$Ktbl], #16
652 ld1.32 {$T1},[$Ktbl], #16
653 ld1.32 {$T2},[$Ktbl], #16
654 ld1.32 {$T3},[$Ktbl], #16
655 rev32 @X[0],@X[0] // yes, even on
656 rev32 @X[1],@X[1] // big-endian
663 st1.32 {$T0-$T1},[$Xfer], #32
665 st1.32 {$T2-$T3},[$Xfer]
681 &Xupdate(\&body_00_15);
682 &Xupdate(\&body_00_15);
683 &Xupdate(\&body_00_15);
684 &Xupdate(\&body_00_15);
686 cmp $t1,#0 // check for K256 terminator
691 sub $Ktbl,$Ktbl,#256 // rewind $Ktbl
694 csel $Xfer, $Xfer, xzr, eq
695 sub $inp,$inp,$Xfer // avoid SEGV
698 &Xpreload(\&body_00_15);
699 &Xpreload(\&body_00_15);
700 &Xpreload(\&body_00_15);
701 &Xpreload(\&body_00_15);
703 add $A,$A,$t4 // h+=Sigma0(a) from the past
704 ldp $t0,$t1,[$ctx,#0]
705 add $A,$A,$t2 // h+=Maj(a,b,c) from the past
706 ldp $t2,$t3,[$ctx,#8]
707 add $A,$A,$t0 // accumulate
709 ldp $t0,$t1,[$ctx,#16]
712 ldp $t2,$t3,[$ctx,#24]
731 .size sha256_block_neon,.-sha256_block_neon
737 .comm OPENSSL_armcap_P,4,4
742 "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000,
743 "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 );
746 my ($mnemonic,$arg)=@_;
748 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
750 sprintf ".inst\t0x%08x\t//%s %s",
751 $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
759 last if (!s/^#/\/\// and !/^$/);
764 foreach(split("\n",$code)) {
766 s/\`([^\`]*)\`/eval($1)/ge;
768 s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge;
770 s/\bq([0-9]+)\b/v$1.16b/g; # old->new registers
773 s/\.\w?32\b// and s/\.16b/\.4s/g;
774 m/(ld|st)1[^\[]+\[0\]/ and s/\.4s/\.s/g;