2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
21 # Performance in cycles per byte out of large buffer.
23 # IALU/gcc-4.4 1xNEON 3xNEON+1xIALU
25 # Cortex-A5 19.3(*)/+95% 21.8 14.1
26 # Cortex-A8 10.5(*)/+160% 13.9 6.35
27 # Cortex-A9 12.9(**)/+110% 14.3 6.50
28 # Cortex-A15 11.0/+40% 16.0 5.00
29 # Snapdragon S4 11.5/+125% 13.6 4.90
31 # (*) most "favourable" result for aligned data on little-endian
32 # processor, result for misaligned data is 10-15% lower;
33 # (**) this result is a trade-off: it can be improved by 20%,
34 # but then Snapdragon S4 and Cortex-A8 results get
38 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
39 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
41 if ($flavour && $flavour ne "void") {
42 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
43 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
44 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
45 die "can't locate arm-xlate.pl";
47 open STDOUT,"| \"$^X\" $xlate $flavour $output";
49 open STDOUT,">$output";
52 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
53 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
55 $arg = "#$arg" if ($arg*1 eq $arg);
56 $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
59 my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x"));
60 my @t=map("r$_",(8..11));
63 my ($a0,$b0,$c0,$d0)=@_;
64 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
65 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
66 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
68 my ($xc,$xc_) = (@t[0..1]);
69 my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]);
72 # Consider order in which variables are addressed by their
77 # 0 4 8 12 < even round
81 # 0 5 10 15 < odd round
86 # 'a', 'b' are permanently allocated in registers, @x[0..7],
87 # while 'c's and pair of 'd's are maintained in memory. If
88 # you observe 'c' column, you'll notice that pair of 'c's is
89 # invariant between rounds. This means that we have to reload
90 # them once per round, in the middle. This is why you'll see
91 # bunch of 'c' stores and loads in the middle, but none in
92 # the beginning or end. If you observe 'd' column, you'll
93 # notice that 15 and 13 are reused in next pair of rounds.
94 # This is why these two are chosen for offloading to memory,
95 # to make loads count more.
97 "&add (@x[$a0],@x[$a0],@x[$b0])",
98 "&mov ($xd,$xd,'ror#16')",
99 "&add (@x[$a1],@x[$a1],@x[$b1])",
100 "&mov ($xd_,$xd_,'ror#16')",
101 "&eor ($xd,$xd,@x[$a0],'ror#16')",
102 "&eor ($xd_,$xd_,@x[$a1],'ror#16')",
104 "&add ($xc,$xc,$xd)",
105 "&mov (@x[$b0],@x[$b0],'ror#20')",
106 "&add ($xc_,$xc_,$xd_)",
107 "&mov (@x[$b1],@x[$b1],'ror#20')",
108 "&eor (@x[$b0],@x[$b0],$xc,'ror#20')",
109 "&eor (@x[$b1],@x[$b1],$xc_,'ror#20')",
111 "&add (@x[$a0],@x[$a0],@x[$b0])",
112 "&mov ($xd,$xd,'ror#24')",
113 "&add (@x[$a1],@x[$a1],@x[$b1])",
114 "&mov ($xd_,$xd_,'ror#24')",
115 "&eor ($xd,$xd,@x[$a0],'ror#24')",
116 "&eor ($xd_,$xd_,@x[$a1],'ror#24')",
118 "&add ($xc,$xc,$xd)",
119 "&mov (@x[$b0],@x[$b0],'ror#25')" );
121 "&str ($xd,'[sp,#4*(16+$d0)]')",
122 "&ldr ($xd,'[sp,#4*(16+$d2)]')" ) if ($odd);
124 "&add ($xc_,$xc_,$xd_)",
125 "&mov (@x[$b1],@x[$b1],'ror#25')" );
127 "&str ($xd_,'[sp,#4*(16+$d1)]')",
128 "&ldr ($xd_,'[sp,#4*(16+$d3)]')" ) if (!$odd);
130 "&eor (@x[$b0],@x[$b0],$xc,'ror#25')",
131 "&eor (@x[$b1],@x[$b1],$xc_,'ror#25')" );
133 $xd=@x[$d2] if (!$odd);
134 $xd_=@x[$d3] if ($odd);
136 "&str ($xc,'[sp,#4*(16+$c0)]')",
137 "&ldr ($xc,'[sp,#4*(16+$c2)]')",
138 "&add (@x[$a2],@x[$a2],@x[$b2])",
139 "&mov ($xd,$xd,'ror#16')",
140 "&str ($xc_,'[sp,#4*(16+$c1)]')",
141 "&ldr ($xc_,'[sp,#4*(16+$c3)]')",
142 "&add (@x[$a3],@x[$a3],@x[$b3])",
143 "&mov ($xd_,$xd_,'ror#16')",
144 "&eor ($xd,$xd,@x[$a2],'ror#16')",
145 "&eor ($xd_,$xd_,@x[$a3],'ror#16')",
147 "&add ($xc,$xc,$xd)",
148 "&mov (@x[$b2],@x[$b2],'ror#20')",
149 "&add ($xc_,$xc_,$xd_)",
150 "&mov (@x[$b3],@x[$b3],'ror#20')",
151 "&eor (@x[$b2],@x[$b2],$xc,'ror#20')",
152 "&eor (@x[$b3],@x[$b3],$xc_,'ror#20')",
154 "&add (@x[$a2],@x[$a2],@x[$b2])",
155 "&mov ($xd,$xd,'ror#24')",
156 "&add (@x[$a3],@x[$a3],@x[$b3])",
157 "&mov ($xd_,$xd_,'ror#24')",
158 "&eor ($xd,$xd,@x[$a2],'ror#24')",
159 "&eor ($xd_,$xd_,@x[$a3],'ror#24')",
161 "&add ($xc,$xc,$xd)",
162 "&mov (@x[$b2],@x[$b2],'ror#25')",
163 "&add ($xc_,$xc_,$xd_)",
164 "&mov (@x[$b3],@x[$b3],'ror#25')",
165 "&eor (@x[$b2],@x[$b2],$xc,'ror#25')",
166 "&eor (@x[$b3],@x[$b3],$xc_,'ror#25')" );
172 #include "arm_arch.h"
174 #if defined(__thumb2__) || defined(__clang__)
177 #if defined(__thumb2__)
183 #if defined(__thumb2__) || defined(__clang__)
184 #define ldrhsb ldrbhs
191 .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
194 #if __ARM_MAX_ARCH__>=7
197 .word OPENSSL_armcap_P
199 .word OPENSSL_armcap_P-.LChaCha20_ctr32
205 .globl ChaCha20_ctr32
206 .type ChaCha20_ctr32,%function
210 ldr r12,[sp,#0] @ pull pointer to counter and nonce
211 stmdb sp!,{r0-r2,r4-r11,lr}
212 #if __ARM_ARCH__<7 && !defined(__thumb2__)
213 sub r14,pc,#16 @ ChaCha20_ctr32
215 adr r14,.LChaCha20_ctr32
223 #if __ARM_MAX_ARCH__>=7
224 cmp r2,#192 @ test len
227 # if !defined(_WIN32)
230 # if defined(__APPLE__) || defined(_WIN32)
237 ldmia r12,{r4-r7} @ load counter and nonce
238 sub sp,sp,#4*(16) @ off-load area
239 sub r14,r14,#64 @ .Lsigma
240 stmdb sp!,{r4-r7} @ copy counter and nonce
241 ldmia r3,{r4-r11} @ load key
242 ldmia r14,{r0-r3} @ load sigma
243 stmdb sp!,{r4-r11} @ copy key
244 stmdb sp!,{r0-r3} @ copy sigma
245 str r10,[sp,#4*(16+10)] @ off-load "@x[10]"
246 str r11,[sp,#4*(16+11)] @ off-load "@x[11]"
251 ldmia sp,{r0-r9} @ load key material
252 str @t[3],[sp,#4*(32+2)] @ save len
253 str r12, [sp,#4*(32+1)] @ save inp
254 str r14, [sp,#4*(32+0)] @ save out
256 ldr @t[3], [sp,#4*(15)]
257 ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
258 ldr @t[2], [sp,#4*(13)]
259 ldr @x[14],[sp,#4*(14)]
260 str @t[3], [sp,#4*(16+15)]
268 foreach (&ROUND(0, 4, 8,12)) { eval; }
269 foreach (&ROUND(0, 5,10,15)) { eval; }
273 ldr @t[3],[sp,#4*(32+2)] @ load len
275 str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store
276 str @t[1], [sp,#4*(16+9)]
277 str @x[12],[sp,#4*(16+12)]
278 str @t[2], [sp,#4*(16+13)]
279 str @x[14],[sp,#4*(16+14)]
281 @ at this point we have first half of 512-bit result in
282 @ @x[0-7] and second half at sp+4*(16+8)
284 cmp @t[3],#64 @ done yet?
288 addlo r12,sp,#4*(0) @ shortcut or ...
289 ldrhs r12,[sp,#4*(32+1)] @ ... load inp
290 addlo r14,sp,#4*(0) @ shortcut or ...
291 ldrhs r14,[sp,#4*(32+0)] @ ... load out
293 ldr @t[0],[sp,#4*(0)] @ load key material
294 ldr @t[1],[sp,#4*(1)]
296 #if __ARM_ARCH__>=6 || !defined(__ARMEB__)
299 tst @t[2],#3 @ are input and output aligned?
300 ldr @t[2],[sp,#4*(2)]
302 cmp @t[3],#64 @ restore flags
304 ldr @t[2],[sp,#4*(2)]
306 ldr @t[3],[sp,#4*(3)]
308 add @x[0],@x[0],@t[0] @ accumulate key material
309 add @x[1],@x[1],@t[1]
313 ldrhs @t[0],[r12],#16 @ load input
314 ldrhs @t[1],[r12,#-12]
316 add @x[2],@x[2],@t[2]
317 add @x[3],@x[3],@t[3]
321 ldrhs @t[2],[r12,#-8]
322 ldrhs @t[3],[r12,#-4]
323 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
332 eorhs @x[0],@x[0],@t[0] @ xor with input
333 eorhs @x[1],@x[1],@t[1]
335 str @x[0],[r14],#16 @ store output
339 eorhs @x[2],@x[2],@t[2]
340 eorhs @x[3],@x[3],@t[3]
341 ldmia @t[0],{@t[0]-@t[3]} @ load key material
346 add @x[4],@x[4],@t[0] @ accumulate key material
347 add @x[5],@x[5],@t[1]
351 ldrhs @t[0],[r12],#16 @ load input
352 ldrhs @t[1],[r12,#-12]
353 add @x[6],@x[6],@t[2]
354 add @x[7],@x[7],@t[3]
358 ldrhs @t[2],[r12,#-8]
359 ldrhs @t[3],[r12,#-4]
360 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
369 eorhs @x[4],@x[4],@t[0]
370 eorhs @x[5],@x[5],@t[1]
372 str @x[4],[r14],#16 @ store output
376 eorhs @x[6],@x[6],@t[2]
377 eorhs @x[7],@x[7],@t[3]
379 ldmia @t[0],{@t[0]-@t[3]} @ load key material
381 add @x[0],sp,#4*(16+8)
384 ldmia @x[0],{@x[0]-@x[7]} @ load second half
386 add @x[0],@x[0],@t[0] @ accumulate key material
387 add @x[1],@x[1],@t[1]
391 ldrhs @t[0],[r12],#16 @ load input
392 ldrhs @t[1],[r12,#-12]
396 strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it
397 strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it
398 add @x[2],@x[2],@t[2]
399 add @x[3],@x[3],@t[3]
403 ldrhs @t[2],[r12,#-8]
404 ldrhs @t[3],[r12,#-4]
405 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
414 eorhs @x[0],@x[0],@t[0]
415 eorhs @x[1],@x[1],@t[1]
417 str @x[0],[r14],#16 @ store output
421 eorhs @x[2],@x[2],@t[2]
422 eorhs @x[3],@x[3],@t[3]
424 ldmia @t[0],{@t[0]-@t[3]} @ load key material
428 add @x[4],@x[4],@t[0] @ accumulate key material
429 add @x[5],@x[5],@t[1]
433 addhi @t[0],@t[0],#1 @ next counter value
434 strhi @t[0],[sp,#4*(12)] @ save next counter value
438 ldrhs @t[0],[r12],#16 @ load input
439 ldrhs @t[1],[r12,#-12]
440 add @x[6],@x[6],@t[2]
441 add @x[7],@x[7],@t[3]
445 ldrhs @t[2],[r12,#-8]
446 ldrhs @t[3],[r12,#-4]
447 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
456 eorhs @x[4],@x[4],@t[0]
457 eorhs @x[5],@x[5],@t[1]
461 ldrne @t[0],[sp,#4*(32+2)] @ re-load len
465 eorhs @x[6],@x[6],@t[2]
466 eorhs @x[7],@x[7],@t[3]
467 str @x[4],[r14],#16 @ store output
472 subhs @t[3],@t[0],#64 @ len-=64
482 .Lunaligned: @ unaligned endian-neutral path
483 cmp @t[3],#64 @ restore flags
487 ldr @t[3],[sp,#4*(3)]
489 for ($i=0;$i<16;$i+=4) {
492 $code.=<<___ if ($i==4);
493 add @x[0],sp,#4*(16+8)
495 $code.=<<___ if ($i==8);
496 ldmia @x[0],{@x[0]-@x[7]} @ load second half
500 strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]"
501 strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]"
504 add @x[$j+0],@x[$j+0],@t[0] @ accumulate key material
506 $code.=<<___ if ($i==12);
510 addhi @t[0],@t[0],#1 @ next counter value
511 strhi @t[0],[sp,#4*(12)] @ save next counter value
514 add @x[$j+1],@x[$j+1],@t[1]
515 add @x[$j+2],@x[$j+2],@t[2]
519 eorlo @t[0],@t[0],@t[0] @ zero or ...
520 ldrhsb @t[0],[r12],#16 @ ... load input
521 eorlo @t[1],@t[1],@t[1]
522 ldrhsb @t[1],[r12,#-12]
524 add @x[$j+3],@x[$j+3],@t[3]
528 eorlo @t[2],@t[2],@t[2]
529 ldrhsb @t[2],[r12,#-8]
530 eorlo @t[3],@t[3],@t[3]
531 ldrhsb @t[3],[r12,#-4]
533 eor @x[$j+0],@t[0],@x[$j+0] @ xor with input (or zero)
534 eor @x[$j+1],@t[1],@x[$j+1]
538 ldrhsb @t[0],[r12,#-15] @ load more input
539 ldrhsb @t[1],[r12,#-11]
540 eor @x[$j+2],@t[2],@x[$j+2]
541 strb @x[$j+0],[r14],#16 @ store output
542 eor @x[$j+3],@t[3],@x[$j+3]
546 ldrhsb @t[2],[r12,#-7]
547 ldrhsb @t[3],[r12,#-3]
548 strb @x[$j+1],[r14,#-12]
549 eor @x[$j+0],@t[0],@x[$j+0],lsr#8
550 strb @x[$j+2],[r14,#-8]
551 eor @x[$j+1],@t[1],@x[$j+1],lsr#8
555 ldrhsb @t[0],[r12,#-14] @ load more input
556 ldrhsb @t[1],[r12,#-10]
557 strb @x[$j+3],[r14,#-4]
558 eor @x[$j+2],@t[2],@x[$j+2],lsr#8
559 strb @x[$j+0],[r14,#-15]
560 eor @x[$j+3],@t[3],@x[$j+3],lsr#8
564 ldrhsb @t[2],[r12,#-6]
565 ldrhsb @t[3],[r12,#-2]
566 strb @x[$j+1],[r14,#-11]
567 eor @x[$j+0],@t[0],@x[$j+0],lsr#8
568 strb @x[$j+2],[r14,#-7]
569 eor @x[$j+1],@t[1],@x[$j+1],lsr#8
573 ldrhsb @t[0],[r12,#-13] @ load more input
574 ldrhsb @t[1],[r12,#-9]
575 strb @x[$j+3],[r14,#-3]
576 eor @x[$j+2],@t[2],@x[$j+2],lsr#8
577 strb @x[$j+0],[r14,#-14]
578 eor @x[$j+3],@t[3],@x[$j+3],lsr#8
582 ldrhsb @t[2],[r12,#-5]
583 ldrhsb @t[3],[r12,#-1]
584 strb @x[$j+1],[r14,#-10]
585 strb @x[$j+2],[r14,#-6]
586 eor @x[$j+0],@t[0],@x[$j+0],lsr#8
587 strb @x[$j+3],[r14,#-2]
588 eor @x[$j+1],@t[1],@x[$j+1],lsr#8
589 strb @x[$j+0],[r14,#-13]
590 eor @x[$j+2],@t[2],@x[$j+2],lsr#8
591 strb @x[$j+1],[r14,#-9]
592 eor @x[$j+3],@t[3],@x[$j+3],lsr#8
593 strb @x[$j+2],[r14,#-5]
594 strb @x[$j+3],[r14,#-1]
596 $code.=<<___ if ($i<12);
597 add @t[0],sp,#4*(4+$i)
598 ldmia @t[0],{@t[0]-@t[3]} @ load key material
605 ldrne @t[0],[sp,#4*(32+2)] @ re-load len
609 subhs @t[3],@t[0],#64 @ len-=64
616 ldr r12,[sp,#4*(32+1)] @ load inp
618 ldr r14,[sp,#4*(32+0)] @ load out
621 ldrb @t[2],[@t[1]],#1 @ read buffer on stack
622 ldrb @t[3],[r12],#1 @ read input
624 eor @t[3],@t[3],@t[2]
625 strb @t[3],[r14],#1 @ store output
631 ldmia sp!,{r4-r11,pc}
632 .size ChaCha20_ctr32,.-ChaCha20_ctr32
636 my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) =
641 my ($a,$b,$c,$d,$t)=@_;
644 "&vadd_i32 ($a,$a,$b)",
646 "&vrev32_16 ($d,$d)", # vrot ($d,16)
648 "&vadd_i32 ($c,$c,$d)",
650 "&vshr_u32 ($b,$t,20)",
651 "&vsli_32 ($b,$t,12)",
653 "&vadd_i32 ($a,$a,$b)",
655 "&vshr_u32 ($d,$t,24)",
656 "&vsli_32 ($d,$t,8)",
658 "&vadd_i32 ($c,$c,$d)",
660 "&vshr_u32 ($b,$t,25)",
661 "&vsli_32 ($b,$t,7)",
663 "&vext_8 ($c,$c,$c,8)",
664 "&vext_8 ($b,$b,$b,$odd?12:4)",
665 "&vext_8 ($d,$d,$d,$odd?4:12)"
670 #if __ARM_MAX_ARCH__>=7
674 .type ChaCha20_neon,%function
677 ldr r12,[sp,#0] @ pull pointer to counter and nonce
678 stmdb sp!,{r0-r2,r4-r11,lr}
681 vstmdb sp!,{d8-d15} @ ABI spec says so
684 vld1.32 {$b0-$c0},[r3] @ load key
685 ldmia r3,{r4-r11} @ load key
688 vld1.32 {$d0},[r12] @ load counter and nonce
690 ldmia r14,{r0-r3} @ load sigma
691 vld1.32 {$a0},[r14]! @ load sigma
692 vld1.32 {$t0},[r14] @ one
693 vst1.32 {$c0-$d0},[r12] @ copy 1/2key|counter|nonce
694 vst1.32 {$a0-$b0},[sp] @ copy sigma|1/2key
696 str r10,[sp,#4*(16+10)] @ off-load "@x[10]"
697 str r11,[sp,#4*(16+11)] @ off-load "@x[11]"
698 vshl.i32 $t1#lo,$t0#lo,#1 @ two
699 vstr $t0#lo,[sp,#4*(16+0)]
700 vshl.i32 $t2#lo,$t0#lo,#2 @ four
701 vstr $t1#lo,[sp,#4*(16+2)]
703 vstr $t2#lo,[sp,#4*(16+4)]
711 ldmia sp,{r0-r9} @ load key material
712 cmp @t[3],#64*2 @ if len<=64*2
713 bls .Lbreak_neon @ switch to integer-only
715 str @t[3],[sp,#4*(32+2)] @ save len
717 str r12, [sp,#4*(32+1)] @ save inp
719 str r14, [sp,#4*(32+0)] @ save out
722 ldr @t[3], [sp,#4*(15)]
723 vadd.i32 $d1,$d0,$t0 @ counter+1
724 ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
726 ldr @t[2], [sp,#4*(13)]
728 ldr @x[14],[sp,#4*(14)]
729 vadd.i32 $d2,$d1,$t0 @ counter+2
730 str @t[3], [sp,#4*(16+15)]
732 add @x[12],@x[12],#3 @ counter+3
739 my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0);
740 my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0);
741 my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0);
742 my @thread3=&ROUND(0,4,8,12);
745 eval; eval(shift(@thread3));
746 eval(shift(@thread1)); eval(shift(@thread3));
747 eval(shift(@thread2)); eval(shift(@thread3));
750 @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1);
751 @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1);
752 @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1);
753 @thread3=&ROUND(0,5,10,15);
756 eval; eval(shift(@thread3));
757 eval(shift(@thread1)); eval(shift(@thread3));
758 eval(shift(@thread2)); eval(shift(@thread3));
764 vld1.32 {$t0-$t1},[sp] @ load key material
765 vld1.32 {$t2-$t3},[@t[3]]
767 ldr @t[3],[sp,#4*(32+2)] @ load len
769 str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store
770 str @t[1], [sp,#4*(16+9)]
771 str @x[12],[sp,#4*(16+12)]
772 str @t[2], [sp,#4*(16+13)]
773 str @x[14],[sp,#4*(16+14)]
775 @ at this point we have first half of 512-bit result in
776 @ @x[0-7] and second half at sp+4*(16+8)
778 ldr r12,[sp,#4*(32+1)] @ load inp
779 ldr r14,[sp,#4*(32+0)] @ load out
781 vadd.i32 $a0,$a0,$t0 @ accumulate key material
784 vldr $t0#lo,[sp,#4*(16+0)] @ one
789 vldr $t1#lo,[sp,#4*(16+2)] @ two
794 vadd.i32 $d1#lo,$d1#lo,$t0#lo @ counter+1
795 vadd.i32 $d2#lo,$d2#lo,$t1#lo @ counter+2
804 vld1.8 {$t0-$t1},[r12]! @ load input
806 vld1.8 {$t2-$t3},[r12]!
807 veor $a0,$a0,$t0 @ xor with input
809 vld1.8 {$t0-$t1},[r12]!
812 vld1.8 {$t2-$t3},[r12]!
815 vst1.8 {$a0-$b0},[r14]! @ store output
817 vld1.8 {$t0-$t1},[r12]!
819 vst1.8 {$c0-$d0},[r14]!
821 vld1.8 {$t2-$t3},[r12]!
824 vld1.32 {$a0-$b0},[@t[3]]! @ load for next iteration
825 veor $t0#hi,$t0#hi,$t0#hi
826 vldr $t0#lo,[sp,#4*(16+4)] @ four
828 vld1.32 {$c0-$d0},[@t[3]]
830 vst1.8 {$a1-$b1},[r14]!
832 vst1.8 {$c1-$d1},[r14]!
834 vadd.i32 $d0#lo,$d0#lo,$t0#lo @ next counter value
835 vldr $t0#lo,[sp,#4*(16+0)] @ one
837 ldmia sp,{@t[0]-@t[3]} @ load key material
838 add @x[0],@x[0],@t[0] @ accumulate key material
839 ldr @t[0],[r12],#16 @ load input
840 vst1.8 {$a2-$b2},[r14]!
841 add @x[1],@x[1],@t[1]
843 vst1.8 {$c2-$d2},[r14]!
844 add @x[2],@x[2],@t[2]
846 add @x[3],@x[3],@t[3]
854 eor @x[0],@x[0],@t[0] @ xor with input
856 eor @x[1],@x[1],@t[1]
857 str @x[0],[r14],#16 @ store output
858 eor @x[2],@x[2],@t[2]
860 eor @x[3],@x[3],@t[3]
861 ldmia @t[0],{@t[0]-@t[3]} @ load key material
865 add @x[4],@x[4],@t[0] @ accumulate key material
866 ldr @t[0],[r12],#16 @ load input
867 add @x[5],@x[5],@t[1]
869 add @x[6],@x[6],@t[2]
871 add @x[7],@x[7],@t[3]
879 eor @x[4],@x[4],@t[0]
881 eor @x[5],@x[5],@t[1]
882 str @x[4],[r14],#16 @ store output
883 eor @x[6],@x[6],@t[2]
885 eor @x[7],@x[7],@t[3]
886 ldmia @t[0],{@t[0]-@t[3]} @ load key material
888 add @x[0],sp,#4*(16+8)
891 ldmia @x[0],{@x[0]-@x[7]} @ load second half
893 add @x[0],@x[0],@t[0] @ accumulate key material
894 ldr @t[0],[r12],#16 @ load input
895 add @x[1],@x[1],@t[1]
900 strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it
901 add @x[2],@x[2],@t[2]
906 strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it
907 add @x[3],@x[3],@t[3]
915 eor @x[0],@x[0],@t[0]
917 eor @x[1],@x[1],@t[1]
918 str @x[0],[r14],#16 @ store output
919 eor @x[2],@x[2],@t[2]
921 eor @x[3],@x[3],@t[3]
922 ldmia @t[0],{@t[0]-@t[3]} @ load key material
926 add @x[4],@x[4],@t[0] @ accumulate key material
927 add @t[0],@t[0],#4 @ next counter value
928 add @x[5],@x[5],@t[1]
929 str @t[0],[sp,#4*(12)] @ save next counter value
930 ldr @t[0],[r12],#16 @ load input
931 add @x[6],@x[6],@t[2]
932 add @x[4],@x[4],#3 @ counter+3
934 add @x[7],@x[7],@t[3]
943 eor @x[4],@x[4],@t[0]
947 ldrhi @t[0],[sp,#4*(32+2)] @ re-load len
948 eor @x[5],@x[5],@t[1]
949 eor @x[6],@x[6],@t[2]
950 str @x[4],[r14],#16 @ store output
951 eor @x[7],@x[7],@t[3]
953 sub @t[3],@t[0],#64*4 @ len-=64*4
962 @ harmonize NEON and integer-only stack frames: load data
963 @ from NEON frame, but save to integer-only one; distance
964 @ between the two is 4*(32+4+16-32)=4*(20).
966 str @t[3], [sp,#4*(20+32+2)] @ save len
967 add @t[3],sp,#4*(32+4)
968 str r12, [sp,#4*(20+32+1)] @ save inp
969 str r14, [sp,#4*(20+32+0)] @ save out
971 ldr @x[12],[sp,#4*(16+10)]
972 ldr @x[14],[sp,#4*(16+11)]
973 vldmia @t[3],{d8-d15} @ fulfill ABI requirement
974 str @x[12],[sp,#4*(20+16+10)] @ copy "@x[10]"
975 str @x[14],[sp,#4*(20+16+11)] @ copy "@x[11]"
977 ldr @t[3], [sp,#4*(15)]
978 ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
979 ldr @t[2], [sp,#4*(13)]
980 ldr @x[14],[sp,#4*(14)]
981 str @t[3], [sp,#4*(20+16+15)]
983 vst1.32 {$a0-$b0},[@t[3]]! @ copy key
984 add sp,sp,#4*(20) @ switch frame
985 vst1.32 {$c0-$d0},[@t[3]]
987 b .Loop @ go integer-only
992 bhs .L192_or_more_neon
994 bhs .L128_or_more_neon
996 bhs .L64_or_more_neon
999 vst1.8 {$a0-$b0},[sp]
1001 vst1.8 {$c0-$d0},[@t[0]]
1006 vld1.8 {$t0-$t1},[r12]!
1007 vld1.8 {$t2-$t3},[r12]!
1012 vst1.8 {$a0-$b0},[r14]!
1013 vst1.8 {$c0-$d0},[r14]!
1018 vst1.8 {$a1-$b1},[sp]
1020 vst1.8 {$c1-$d1},[@t[0]]
1021 sub @t[3],@t[3],#64*1 @ len-=64*1
1026 vld1.8 {$t0-$t1},[r12]!
1027 vld1.8 {$t2-$t3},[r12]!
1030 vld1.8 {$t0-$t1},[r12]!
1033 vld1.8 {$t2-$t3},[r12]!
1037 vst1.8 {$a0-$b0},[r14]!
1039 vst1.8 {$c0-$d0},[r14]!
1041 vst1.8 {$a1-$b1},[r14]!
1042 vst1.8 {$c1-$d1},[r14]!
1047 vst1.8 {$a2-$b2},[sp]
1049 vst1.8 {$c2-$d2},[@t[0]]
1050 sub @t[3],@t[3],#64*2 @ len-=64*2
1055 vld1.8 {$t0-$t1},[r12]!
1056 vld1.8 {$t2-$t3},[r12]!
1059 vld1.8 {$t0-$t1},[r12]!
1062 vld1.8 {$t2-$t3},[r12]!
1066 vld1.8 {$t0-$t1},[r12]!
1068 vst1.8 {$a0-$b0},[r14]!
1070 vld1.8 {$t2-$t3},[r12]!
1073 vst1.8 {$c0-$d0},[r14]!
1075 vst1.8 {$a1-$b1},[r14]!
1077 vst1.8 {$c1-$d1},[r14]!
1079 vst1.8 {$a2-$b2},[r14]!
1080 vst1.8 {$c2-$d2},[r14]!
1084 ldmia sp,{@t[0]-@t[3]} @ load key material
1085 add @x[0],@x[0],@t[0] @ accumulate key material
1087 add @x[1],@x[1],@t[1]
1088 add @x[2],@x[2],@t[2]
1089 add @x[3],@x[3],@t[3]
1090 ldmia @t[0],{@t[0]-@t[3]} @ load key material
1092 add @x[4],@x[4],@t[0] @ accumulate key material
1094 add @x[5],@x[5],@t[1]
1095 add @x[6],@x[6],@t[2]
1096 add @x[7],@x[7],@t[3]
1097 ldmia @t[0],{@t[0]-@t[3]} @ load key material
1108 stmia sp,{@x[0]-@x[7]}
1109 add @x[0],sp,#4*(16+8)
1111 ldmia @x[0],{@x[0]-@x[7]} @ load second half
1113 add @x[0],@x[0],@t[0] @ accumulate key material
1114 add @t[0],sp,#4*(12)
1115 add @x[1],@x[1],@t[1]
1116 add @x[2],@x[2],@t[2]
1117 add @x[3],@x[3],@t[3]
1118 ldmia @t[0],{@t[0]-@t[3]} @ load key material
1120 add @x[4],@x[4],@t[0] @ accumulate key material
1122 add @x[5],@x[5],@t[1]
1123 add @x[4],@x[4],#3 @ counter+3
1124 add @x[6],@x[6],@t[2]
1125 add @x[7],@x[7],@t[3]
1126 ldr @t[3],[sp,#4*(32+2)] @ re-load len
1137 stmia @t[0],{@x[0]-@x[7]}
1139 sub @t[3],@t[3],#64*3 @ len-=64*3
1142 ldrb @t[0],[@t[2]],#1 @ read buffer on stack
1143 ldrb @t[1],[r12],#1 @ read input
1145 eor @t[0],@t[0],@t[1]
1146 strb @t[0],[r14],#1 @ store output
1153 ldmia sp!,{r4-r11,pc}
1154 .size ChaCha20_neon,.-ChaCha20_neon
1155 .comm OPENSSL_armcap_P,4,4
1160 foreach (split("\n",$code)) {
1161 s/\`([^\`]*)\`/eval $1/geo;
1163 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;