2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # ChaCha20 for PowerPC/AltiVec.
21 # Performance in cycles per byte out of large buffer.
23 # IALU/gcc-4.x 3xAltiVec+1xIALU
25 # Freescale e300 13.6/+115% -
26 # PPC74x0/G4e 6.81/+310% 4.66
27 # PPC970/G5 9.29/+160% 4.60
28 # POWER7 8.62/+61% 4.27
29 # POWER8 8.70/+51% 3.96
30 # POWER9 6.61/+29% 3.67
34 if ($flavour =~ /64/) {
41 } elsif ($flavour =~ /32/) {
48 } else { die "nonsense $flavour"; }
50 $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
52 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
53 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
54 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
55 die "can't locate ppc-xlate.pl";
57 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
60 $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
62 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
63 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
64 $code .= "\t$opcode\t".join(',',@_)."\n";
69 my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
71 my @x=map("r$_",(16..31));
72 my @d=map("r$_",(11,12,14,15));
73 my @t=map("r$_",(7..10));
76 my ($a0,$b0,$c0,$d0)=@_;
77 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
78 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
79 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
82 "&add (@x[$a0],@x[$a0],@x[$b0])",
83 "&add (@x[$a1],@x[$a1],@x[$b1])",
84 "&add (@x[$a2],@x[$a2],@x[$b2])",
85 "&add (@x[$a3],@x[$a3],@x[$b3])",
86 "&xor (@x[$d0],@x[$d0],@x[$a0])",
87 "&xor (@x[$d1],@x[$d1],@x[$a1])",
88 "&xor (@x[$d2],@x[$d2],@x[$a2])",
89 "&xor (@x[$d3],@x[$d3],@x[$a3])",
90 "&rotlwi (@x[$d0],@x[$d0],16)",
91 "&rotlwi (@x[$d1],@x[$d1],16)",
92 "&rotlwi (@x[$d2],@x[$d2],16)",
93 "&rotlwi (@x[$d3],@x[$d3],16)",
95 "&add (@x[$c0],@x[$c0],@x[$d0])",
96 "&add (@x[$c1],@x[$c1],@x[$d1])",
97 "&add (@x[$c2],@x[$c2],@x[$d2])",
98 "&add (@x[$c3],@x[$c3],@x[$d3])",
99 "&xor (@x[$b0],@x[$b0],@x[$c0])",
100 "&xor (@x[$b1],@x[$b1],@x[$c1])",
101 "&xor (@x[$b2],@x[$b2],@x[$c2])",
102 "&xor (@x[$b3],@x[$b3],@x[$c3])",
103 "&rotlwi (@x[$b0],@x[$b0],12)",
104 "&rotlwi (@x[$b1],@x[$b1],12)",
105 "&rotlwi (@x[$b2],@x[$b2],12)",
106 "&rotlwi (@x[$b3],@x[$b3],12)",
108 "&add (@x[$a0],@x[$a0],@x[$b0])",
109 "&add (@x[$a1],@x[$a1],@x[$b1])",
110 "&add (@x[$a2],@x[$a2],@x[$b2])",
111 "&add (@x[$a3],@x[$a3],@x[$b3])",
112 "&xor (@x[$d0],@x[$d0],@x[$a0])",
113 "&xor (@x[$d1],@x[$d1],@x[$a1])",
114 "&xor (@x[$d2],@x[$d2],@x[$a2])",
115 "&xor (@x[$d3],@x[$d3],@x[$a3])",
116 "&rotlwi (@x[$d0],@x[$d0],8)",
117 "&rotlwi (@x[$d1],@x[$d1],8)",
118 "&rotlwi (@x[$d2],@x[$d2],8)",
119 "&rotlwi (@x[$d3],@x[$d3],8)",
121 "&add (@x[$c0],@x[$c0],@x[$d0])",
122 "&add (@x[$c1],@x[$c1],@x[$d1])",
123 "&add (@x[$c2],@x[$c2],@x[$d2])",
124 "&add (@x[$c3],@x[$c3],@x[$d3])",
125 "&xor (@x[$b0],@x[$b0],@x[$c0])",
126 "&xor (@x[$b1],@x[$b1],@x[$c1])",
127 "&xor (@x[$b2],@x[$b2],@x[$c2])",
128 "&xor (@x[$b3],@x[$b3],@x[$c3])",
129 "&rotlwi (@x[$b0],@x[$b0],7)",
130 "&rotlwi (@x[$b1],@x[$b1],7)",
131 "&rotlwi (@x[$b2],@x[$b2],7)",
132 "&rotlwi (@x[$b3],@x[$b3],7)"
140 .globl .ChaCha20_ctr32_int
143 __ChaCha20_ctr32_int:
147 $STU $sp,-$FRAME($sp)
150 $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
151 $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
152 $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
153 $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
154 $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
155 $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
156 $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
157 $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
158 $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
159 $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
160 $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
161 $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
162 $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
163 $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
164 $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
165 $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
166 $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
167 $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
168 $PUSH r0,`$FRAME+$LRSAVE`($sp)
170 lwz @d[0],0($ctr) # load counter
177 $POP r0,`$FRAME+$LRSAVE`($sp)
178 $POP r14,`$FRAME-$SIZE_T*18`($sp)
179 $POP r15,`$FRAME-$SIZE_T*17`($sp)
180 $POP r16,`$FRAME-$SIZE_T*16`($sp)
181 $POP r17,`$FRAME-$SIZE_T*15`($sp)
182 $POP r18,`$FRAME-$SIZE_T*14`($sp)
183 $POP r19,`$FRAME-$SIZE_T*13`($sp)
184 $POP r20,`$FRAME-$SIZE_T*12`($sp)
185 $POP r21,`$FRAME-$SIZE_T*11`($sp)
186 $POP r22,`$FRAME-$SIZE_T*10`($sp)
187 $POP r23,`$FRAME-$SIZE_T*9`($sp)
188 $POP r24,`$FRAME-$SIZE_T*8`($sp)
189 $POP r25,`$FRAME-$SIZE_T*7`($sp)
190 $POP r26,`$FRAME-$SIZE_T*6`($sp)
191 $POP r27,`$FRAME-$SIZE_T*5`($sp)
192 $POP r28,`$FRAME-$SIZE_T*4`($sp)
193 $POP r29,`$FRAME-$SIZE_T*3`($sp)
194 $POP r30,`$FRAME-$SIZE_T*2`($sp)
195 $POP r31,`$FRAME-$SIZE_T*1`($sp)
200 .byte 0,12,4,1,0x80,18,5,0
202 .size .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
207 lis @x[0],0x6170 # synthesize sigma
211 ori @x[0],@x[0],0x7865
212 ori @x[1],@x[1],0x646e
213 ori @x[2],@x[2],0x2d32
214 ori @x[3],@x[3],0x6574
216 li r0,10 # inner loop counter
217 lwz @x[4],0($key) # load key
222 mr @x[12],@d[0] # copy counter
238 foreach (&ROUND(0, 4, 8,12)) { eval; }
239 foreach (&ROUND(0, 5,10,15)) { eval; }
243 subic $len,$len,64 # $len-=64
244 addi @x[0],@x[0],0x7865 # accumulate key block
245 addi @x[1],@x[1],0x646e
246 addi @x[2],@x[2],0x2d32
247 addi @x[3],@x[3],0x6574
248 addis @x[0],@x[0],0x6170
249 addis @x[1],@x[1],0x3320
250 addis @x[2],@x[2],0x7962
251 addis @x[3],@x[3],0x6b20
253 subfe. r0,r0,r0 # borrow?-1:0
254 add @x[4],@x[4],@t[0]
256 add @x[5],@x[5],@t[1]
258 add @x[6],@x[6],@t[2]
260 add @x[7],@x[7],@t[3]
262 add @x[8],@x[8],@t[0]
263 add @x[9],@x[9],@t[1]
264 add @x[10],@x[10],@t[2]
265 add @x[11],@x[11],@t[3]
267 add @x[12],@x[12],@d[0]
268 add @x[13],@x[13],@d[1]
269 add @x[14],@x[14],@d[2]
270 add @x[15],@x[15],@d[3]
271 addi @d[0],@d[0],1 # increment counter
273 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
276 rotlwi @x[$i],@x[$i],8
277 rlwimi @x[$i],@t[$i&3],24,0,7
278 rlwimi @x[$i],@t[$i&3],24,16,23
282 bne Ltail # $len-=64 borrowed
284 lwz @t[0],0($inp) # load input, aligned or not
286 ${UCMP}i $len,0 # done already?
289 xor @x[0],@x[0],@t[0] # xor with input
291 xor @x[1],@x[1],@t[1]
293 xor @x[2],@x[2],@t[2]
295 xor @x[3],@x[3],@t[3]
297 xor @x[4],@x[4],@t[0]
299 xor @x[5],@x[5],@t[1]
301 xor @x[6],@x[6],@t[2]
303 xor @x[7],@x[7],@t[3]
305 xor @x[8],@x[8],@t[0]
307 xor @x[9],@x[9],@t[1]
309 xor @x[10],@x[10],@t[2]
311 xor @x[11],@x[11],@t[3]
313 xor @x[12],@x[12],@t[0]
314 stw @x[0],0($out) # store output, aligned or not
315 xor @x[13],@x[13],@t[1]
317 xor @x[14],@x[14],@t[2]
319 xor @x[15],@x[15],@t[3]
342 addi $len,$len,64 # restore tail length
343 subi $inp,$inp,1 # prepare for *++ptr
345 addi @t[0],$sp,$LOCALS-1
348 stw @x[0],`$LOCALS+0`($sp) # save whole block to stack
349 stw @x[1],`$LOCALS+4`($sp)
350 stw @x[2],`$LOCALS+8`($sp)
351 stw @x[3],`$LOCALS+12`($sp)
352 stw @x[4],`$LOCALS+16`($sp)
353 stw @x[5],`$LOCALS+20`($sp)
354 stw @x[6],`$LOCALS+24`($sp)
355 stw @x[7],`$LOCALS+28`($sp)
356 stw @x[8],`$LOCALS+32`($sp)
357 stw @x[9],`$LOCALS+36`($sp)
358 stw @x[10],`$LOCALS+40`($sp)
359 stw @x[11],`$LOCALS+44`($sp)
360 stw @x[12],`$LOCALS+48`($sp)
361 stw @x[13],`$LOCALS+52`($sp)
362 stw @x[14],`$LOCALS+56`($sp)
363 stw @x[15],`$LOCALS+60`($sp)
365 Loop_tail: # byte-by-byte loop
368 xor @d[1],@d[0],@x[0]
372 stw $sp,`$LOCALS+0`($sp) # wipe block on stack
373 stw $sp,`$LOCALS+4`($sp)
374 stw $sp,`$LOCALS+8`($sp)
375 stw $sp,`$LOCALS+12`($sp)
376 stw $sp,`$LOCALS+16`($sp)
377 stw $sp,`$LOCALS+20`($sp)
378 stw $sp,`$LOCALS+24`($sp)
379 stw $sp,`$LOCALS+28`($sp)
380 stw $sp,`$LOCALS+32`($sp)
381 stw $sp,`$LOCALS+36`($sp)
382 stw $sp,`$LOCALS+40`($sp)
383 stw $sp,`$LOCALS+44`($sp)
384 stw $sp,`$LOCALS+48`($sp)
385 stw $sp,`$LOCALS+52`($sp)
386 stw $sp,`$LOCALS+56`($sp)
387 stw $sp,`$LOCALS+60`($sp)
391 .byte 0,12,0x14,0,0,0,0,0
395 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2) =
397 my (@K)=map("v$_",(15..20));
398 my ($FOUR,$sixteen,$twenty4,$twenty,$twelve,$twenty5,$seven) =
400 my ($inpperm,$outperm,$outmask) = map("v$_",(28..30));
401 my @D=("v31",$seven,$T0,$T1,$T2);
403 my $FRAME=$LOCALS+64+13*16+18*$SIZE_T; # 13*16 is for v20-v31 offload
407 my ($a,$b,$c,$d,$t)=@_;
410 "&vadduwm ('$a','$a','$b')",
411 "&vxor ('$d','$d','$a')",
412 "&vperm ('$d','$d','$d','$sixteen')",
414 "&vadduwm ('$c','$c','$d')",
415 "&vxor ('$t','$b','$c')",
416 "&vsrw ('$b','$t','$twenty')",
417 "&vslw ('$t','$t','$twelve')",
418 "&vor ('$b','$b','$t')",
420 "&vadduwm ('$a','$a','$b')",
421 "&vxor ('$d','$d','$a')",
422 "&vperm ('$d','$d','$d','$twenty4')",
424 "&vadduwm ('$c','$c','$d')",
425 "&vxor ('$t','$b','$c')",
426 "&vsrw ('$b','$t','$twenty5')",
427 "&vslw ('$t','$t','$seven')",
428 "&vor ('$b','$b','$t')",
430 "&vsldoi ('$c','$c','$c',8)",
431 "&vsldoi ('$b','$b','$b',$odd?4:12)",
432 "&vsldoi ('$d','$d','$d',$odd?12:4)"
438 .globl .ChaCha20_ctr32_vmx
442 blt __ChaCha20_ctr32_int
444 $STU $sp,-$FRAME($sp)
446 li r10,`15+$LOCALS+64`
447 li r11,`31+$LOCALS+64`
471 stw r12,`$FRAME-$SIZE_T*18-4`($sp) # save vrsave
472 $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
473 $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
474 $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
475 $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
476 $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
477 $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
478 $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
479 $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
480 $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
481 $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
482 $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
483 $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
484 $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
485 $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
486 $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
487 $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
488 $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
489 $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
491 $PUSH r0, `$FRAME+$LRSAVE`($sp)
492 mtspr 256,r12 # preserve all AltiVec registers
494 bl Lconsts # returns pointer Lsigma in r12
499 li @x[4],31 # 31 is not a typo
500 li @x[5],15 # nor is 15
502 lvx @K[1],0,$key # load key
503 ?lvsr $T0,0,$key # prepare unaligned load
507 lvx @K[3],0,$ctr # load counter
508 ?lvsr $T1,0,$ctr # prepare unaligned load
511 lvx @K[0],0,r12 # load constants
512 lvx @K[5],@x[0],r12 # one
514 lvx $sixteen,@x[2],r12
515 lvx $twenty4,@x[3],r12
517 ?vperm @K[1],@K[2],@K[1],$T0 # align key
518 ?vperm @K[2],@D[0],@K[2],$T0
519 ?vperm @K[3],@D[1],@K[3],$T1 # align counter
521 lwz @d[0],0($ctr) # load counter to GPR
523 vadduwm @K[3],@K[3],@K[5] # adjust AltiVec counter
525 vadduwm @K[4],@K[3],@K[5]
527 vadduwm @K[5],@K[4],@K[5]
529 vspltisw $twenty,-12 # synthesize constants
532 #vspltisw $seven,7 # synthesized in the loop
534 vxor $T0,$T0,$T0 # 0x00..00
535 vspltisw $outmask,-1 # 0xff..ff
536 ?lvsr $inpperm,0,$inp # prepare for unaligned load
537 ?lvsl $outperm,0,$out # prepare for unaligned store
538 ?vperm $outmask,$outmask,$T0,$outperm
540 be?lvsl $T0,0,@x[0] # 0x00..0f
541 be?vspltisb $T1,3 # 0x03..03
542 be?vxor $T0,$T0,$T1 # swap bytes within words
543 be?vxor $outperm,$outperm,$T1
544 be?vperm $inpperm,$inpperm,$inpperm,$T0
550 lis @x[0],0x6170 # synthesize sigma
556 ori @x[0],@x[0],0x7865
557 ori @x[1],@x[1],0x646e
559 ori @x[2],@x[2],0x2d32
560 ori @x[3],@x[3],0x6574
563 li r0,10 # inner loop counter
564 lwz @x[4],0($key) # load key to GPR
574 mr @x[12],@d[0] # copy GPR counter
595 my @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,0);
596 my @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,0);
597 my @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,0);
598 my @thread3=&ROUND(0,4,8,12);
601 eval; eval(shift(@thread3));
602 eval(shift(@thread1)); eval(shift(@thread3));
603 eval(shift(@thread2)); eval(shift(@thread3));
606 @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,1);
607 @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,1);
608 @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,1);
609 @thread3=&ROUND(0,5,10,15);
612 eval; eval(shift(@thread3));
613 eval(shift(@thread1)); eval(shift(@thread3));
614 eval(shift(@thread2)); eval(shift(@thread3));
619 subi $len,$len,256 # $len-=256
620 addi @x[0],@x[0],0x7865 # accumulate key block
621 addi @x[1],@x[1],0x646e
622 addi @x[2],@x[2],0x2d32
623 addi @x[3],@x[3],0x6574
624 addis @x[0],@x[0],0x6170
625 addis @x[1],@x[1],0x3320
626 addis @x[2],@x[2],0x7962
627 addis @x[3],@x[3],0x6b20
628 add @x[4],@x[4],@t[0]
630 add @x[5],@x[5],@t[1]
632 add @x[6],@x[6],@t[2]
634 add @x[7],@x[7],@t[3]
636 add @x[8],@x[8],@t[0]
637 add @x[9],@x[9],@t[1]
638 add @x[10],@x[10],@t[2]
639 add @x[11],@x[11],@t[3]
640 add @x[12],@x[12],@d[0]
641 add @x[13],@x[13],@d[1]
642 add @x[14],@x[14],@d[2]
643 add @x[15],@x[15],@d[3]
645 vadduwm $A0,$A0,@K[0] # accumulate key block
646 vadduwm $A1,$A1,@K[0]
647 vadduwm $A2,$A2,@K[0]
648 vadduwm $B0,$B0,@K[1]
649 vadduwm $B1,$B1,@K[1]
650 vadduwm $B2,$B2,@K[1]
651 vadduwm $C0,$C0,@K[2]
652 vadduwm $C1,$C1,@K[2]
653 vadduwm $C2,$C2,@K[2]
654 vadduwm $D0,$D0,@K[3]
655 vadduwm $D1,$D1,@K[4]
656 vadduwm $D2,$D2,@K[5]
658 addi @d[0],@d[0],4 # increment counter
659 vadduwm @K[3],@K[3],$FOUR
660 vadduwm @K[4],@K[4],$FOUR
661 vadduwm @K[5],@K[5],$FOUR
664 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
667 rotlwi @x[$i],@x[$i],8
668 rlwimi @x[$i],@t[$i&3],24,0,7
669 rlwimi @x[$i],@t[$i&3],24,16,23
673 lwz @t[0],0($inp) # load input, aligned or not
677 xor @x[0],@x[0],@t[0] # xor with input
679 xor @x[1],@x[1],@t[1]
681 xor @x[2],@x[2],@t[2]
683 xor @x[3],@x[3],@t[3]
685 xor @x[4],@x[4],@t[0]
687 xor @x[5],@x[5],@t[1]
689 xor @x[6],@x[6],@t[2]
691 xor @x[7],@x[7],@t[3]
693 xor @x[8],@x[8],@t[0]
695 xor @x[9],@x[9],@t[1]
697 xor @x[10],@x[10],@t[2]
699 xor @x[11],@x[11],@t[3]
701 xor @x[12],@x[12],@t[0]
702 stw @x[0],0($out) # store output, aligned or not
703 xor @x[13],@x[13],@t[1]
705 xor @x[14],@x[14],@t[2]
707 xor @x[15],@x[15],@t[3]
728 lvx @D[0],0,$inp # load input
735 ?vperm @D[0],@D[1],@D[0],$inpperm # align input
736 ?vperm @D[1],@D[2],@D[1],$inpperm
737 ?vperm @D[2],@D[3],@D[2],$inpperm
738 ?vperm @D[3],@D[4],@D[3],$inpperm
739 vxor $A0,$A0,@D[0] # xor with input
741 lvx @D[1],@t[0],$inp # keep loading input
748 li @t[3],63 # 63 is not a typo
749 vperm $A0,$A0,$A0,$outperm # pre-misalign output
750 vperm $B0,$B0,$B0,$outperm
751 vperm $C0,$C0,$C0,$outperm
752 vperm $D0,$D0,$D0,$outperm
754 ?vperm @D[4],@D[1],@D[4],$inpperm # align input
755 ?vperm @D[1],@D[2],@D[1],$inpperm
756 ?vperm @D[2],@D[3],@D[2],$inpperm
757 ?vperm @D[3],@D[0],@D[3],$inpperm
760 lvx @D[1],@t[0],$inp # keep loading input
765 lvx @D[4],@t[3],$inp # redundant in aligned case
767 vperm $A1,$A1,$A1,$outperm # pre-misalign output
768 vperm $B1,$B1,$B1,$outperm
769 vperm $C1,$C1,$C1,$outperm
770 vperm $D1,$D1,$D1,$outperm
772 ?vperm @D[0],@D[1],@D[0],$inpperm # align input
773 ?vperm @D[1],@D[2],@D[1],$inpperm
774 ?vperm @D[2],@D[3],@D[2],$inpperm
775 ?vperm @D[3],@D[4],@D[3],$inpperm
780 vperm $A2,$A2,$A2,$outperm # pre-misalign output
781 vperm $B2,$B2,$B2,$outperm
782 vperm $C2,$C2,$C2,$outperm
783 vperm $D2,$D2,$D2,$outperm
785 andi. @x[1],$out,15 # is $out aligned?
788 vsel @D[0],$A0,$B0,$outmask # collect pre-misaligned output
789 vsel @D[1],$B0,$C0,$outmask
790 vsel @D[2],$C0,$D0,$outmask
791 vsel @D[3],$D0,$A1,$outmask
792 vsel $B0,$A1,$B1,$outmask
793 vsel $C0,$B1,$C1,$outmask
794 vsel $D0,$C1,$D1,$outmask
795 vsel $A1,$D1,$A2,$outmask
796 vsel $B1,$A2,$B2,$outmask
797 vsel $C1,$B2,$C2,$outmask
798 vsel $D1,$C2,$D2,$outmask
800 #stvx $A0,0,$out # take it easy on the edges
801 stvx @D[0],@t[0],$out # store output
802 stvx @D[1],@t[1],$out
803 stvx @D[2],@t[2],$out
818 sub @x[2],$out,@x[1] # in misaligned case edges
819 li @x[3],0 # are written byte-by-byte
821 stvebx $D2,@x[3],@x[2]
824 bne Lunaligned_tail_vmx
826 sub @x[2],@x[0],@x[1]
828 stvebx $A0,@x[1],@x[2]
831 bne Lunaligned_head_vmx
833 ${UCMP}i $len,255 # done with 256-byte blocks yet?
840 stvx $A0,0,@x[0] # head hexaword was not stored
842 ${UCMP}i $len,255 # done with 256-byte blocks yet?
847 ${UCMP}i $len,0 # done yet?
850 lwz r12,`$FRAME-$SIZE_T*18-4`($sp) # pull vrsave
851 li r10,`15+$LOCALS+64`
852 li r11,`31+$LOCALS+64`
853 mtspr 256,r12 # restore vrsave
876 $POP r0, `$FRAME+$LRSAVE`($sp)
877 $POP r14,`$FRAME-$SIZE_T*18`($sp)
878 $POP r15,`$FRAME-$SIZE_T*17`($sp)
879 $POP r16,`$FRAME-$SIZE_T*16`($sp)
880 $POP r17,`$FRAME-$SIZE_T*15`($sp)
881 $POP r18,`$FRAME-$SIZE_T*14`($sp)
882 $POP r19,`$FRAME-$SIZE_T*13`($sp)
883 $POP r20,`$FRAME-$SIZE_T*12`($sp)
884 $POP r21,`$FRAME-$SIZE_T*11`($sp)
885 $POP r22,`$FRAME-$SIZE_T*10`($sp)
886 $POP r23,`$FRAME-$SIZE_T*9`($sp)
887 $POP r24,`$FRAME-$SIZE_T*8`($sp)
888 $POP r25,`$FRAME-$SIZE_T*7`($sp)
889 $POP r26,`$FRAME-$SIZE_T*6`($sp)
890 $POP r27,`$FRAME-$SIZE_T*5`($sp)
891 $POP r28,`$FRAME-$SIZE_T*4`($sp)
892 $POP r29,`$FRAME-$SIZE_T*3`($sp)
893 $POP r30,`$FRAME-$SIZE_T*2`($sp)
894 $POP r31,`$FRAME-$SIZE_T*1`($sp)
899 .byte 0,12,0x04,1,0x80,18,5,0
901 .size .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
907 mflr r12 #vvvvv "distance between . and _vpaes_consts
912 .byte 0,12,0x14,0,0,0,0,0
915 .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
919 $code.=<<___ if ($LITTLE_ENDIAN);
920 .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
921 .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
923 $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
924 .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
925 .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
928 .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
933 foreach (split("\n",$code)) {
934 s/\`([^\`]*)\`/eval $1/ge;
936 # instructions prefixed with '?' are endian-specific and need
937 # to be adjusted accordingly...
938 if ($flavour !~ /le$/) { # big-endian
943 s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
944 s/(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/;
945 } else { # little-endian