PPC assembly pack: add ChaCha20 and Poly1305 modules.
authorAndy Polyakov <appro@openssl.org>
Wed, 10 Feb 2016 10:51:23 +0000 (11:51 +0100)
committerAndy Polyakov <appro@openssl.org>
Sat, 13 Feb 2016 16:21:47 +0000 (17:21 +0100)
Reviewed-by: Richard Levitte <levitte@openssl.org>
crypto/chacha/Makefile.in
crypto/chacha/asm/chacha-ppc.pl [new file with mode: 0755]
crypto/chacha/build.info
crypto/poly1305/Makefile.in
crypto/poly1305/asm/poly1305-ppc.pl [new file with mode: 0755]
crypto/poly1305/asm/poly1305-ppcfp.pl [new file with mode: 0755]
crypto/poly1305/build.info

index dd0f36c..08bfdb5 100644 (file)
@@ -40,6 +40,8 @@ chacha-x86.s:         asm/chacha-x86.pl
        $(PERL) asm/chacha-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
 chacha-x86_64.s:       asm/chacha-x86_64.pl
        $(PERL) asm/chacha-x86_64.pl $(PERLASM_SCHEME) > $@
+chacha-ppc.s:  asm/chacha-ppc.pl
+       $(PERL) asm/chacha-ppc.pl $(PERLASM_SCHEME) $@
 
 chacha-%.S:    asm/chacha-%.pl;        $(PERL) $< $(PERLASM_SCHEME) $@
 
diff --git a/crypto/chacha/asm/chacha-ppc.pl b/crypto/chacha/asm/chacha-ppc.pl
new file mode 100755 (executable)
index 0000000..23f7fad
--- /dev/null
@@ -0,0 +1,942 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# October 2015
+# 
+# ChaCha20 for PowerPC/AltiVec.
+#
+# Performance in cycles per byte out of large buffer.
+#
+#                      IALU/gcc-4.x    3xAltiVec+1xIALU
+#
+# Freescale e300       13.6/+115%      -
+# PPC74x0              6.81/+310%      4.66
+# POWER7               8.62/+61%       4.27
+# POWER8               8.70/+51%       3.96
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+       $SIZE_T =8;
+       $LRSAVE =2*$SIZE_T;
+       $STU    ="stdu";
+       $POP    ="ld";
+       $PUSH   ="std";
+       $UCMP   ="cmpld";
+} elsif ($flavour =~ /32/) {
+       $SIZE_T =4;
+       $LRSAVE =$SIZE_T;
+       $STU    ="stwu";
+       $POP    ="lwz";
+       $PUSH   ="stw";
+       $UCMP   ="cmplw";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$LOCALS=6*$SIZE_T;
+$FRAME=$LOCALS+64+18*$SIZE_T;  # 64 is for local variables
+
+sub AUTOLOAD()         # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+    $code .= "\t$opcode\t".join(',',@_)."\n";
+}
+
+my $sp = "r1";
+
+my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
+
+my @x=map("r$_",(16..31));
+my @d=map("r$_",(11,12,14,15));
+my @t=map("r$_",(7..10));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+
+    (
+       "&add           (@x[$a0],@x[$a0],@x[$b0])",
+        "&add          (@x[$a1],@x[$a1],@x[$b1])",
+         "&add         (@x[$a2],@x[$a2],@x[$b2])",
+          "&add        (@x[$a3],@x[$a3],@x[$b3])",
+       "&xor           (@x[$d0],@x[$d0],@x[$a0])",
+        "&xor          (@x[$d1],@x[$d1],@x[$a1])",
+         "&xor         (@x[$d2],@x[$d2],@x[$a2])",
+          "&xor        (@x[$d3],@x[$d3],@x[$a3])",
+       "&rotlwi        (@x[$d0],@x[$d0],16)",
+        "&rotlwi       (@x[$d1],@x[$d1],16)",
+         "&rotlwi      (@x[$d2],@x[$d2],16)",
+          "&rotlwi     (@x[$d3],@x[$d3],16)",
+
+       "&add           (@x[$c0],@x[$c0],@x[$d0])",
+        "&add          (@x[$c1],@x[$c1],@x[$d1])",
+         "&add         (@x[$c2],@x[$c2],@x[$d2])",
+          "&add        (@x[$c3],@x[$c3],@x[$d3])",
+       "&xor           (@x[$b0],@x[$b0],@x[$c0])",
+        "&xor          (@x[$b1],@x[$b1],@x[$c1])",
+         "&xor         (@x[$b2],@x[$b2],@x[$c2])",
+          "&xor        (@x[$b3],@x[$b3],@x[$c3])",
+       "&rotlwi        (@x[$b0],@x[$b0],12)",
+        "&rotlwi       (@x[$b1],@x[$b1],12)",
+         "&rotlwi      (@x[$b2],@x[$b2],12)",
+          "&rotlwi     (@x[$b3],@x[$b3],12)",
+
+       "&add           (@x[$a0],@x[$a0],@x[$b0])",
+        "&add          (@x[$a1],@x[$a1],@x[$b1])",
+         "&add         (@x[$a2],@x[$a2],@x[$b2])",
+          "&add        (@x[$a3],@x[$a3],@x[$b3])",
+       "&xor           (@x[$d0],@x[$d0],@x[$a0])",
+        "&xor          (@x[$d1],@x[$d1],@x[$a1])",
+         "&xor         (@x[$d2],@x[$d2],@x[$a2])",
+          "&xor        (@x[$d3],@x[$d3],@x[$a3])",
+       "&rotlwi        (@x[$d0],@x[$d0],8)",
+        "&rotlwi       (@x[$d1],@x[$d1],8)",
+         "&rotlwi      (@x[$d2],@x[$d2],8)",
+          "&rotlwi     (@x[$d3],@x[$d3],8)",
+
+       "&add           (@x[$c0],@x[$c0],@x[$d0])",
+        "&add          (@x[$c1],@x[$c1],@x[$d1])",
+         "&add         (@x[$c2],@x[$c2],@x[$d2])",
+          "&add        (@x[$c3],@x[$c3],@x[$d3])",
+       "&xor           (@x[$b0],@x[$b0],@x[$c0])",
+        "&xor          (@x[$b1],@x[$b1],@x[$c1])",
+         "&xor         (@x[$b2],@x[$b2],@x[$c2])",
+          "&xor        (@x[$b3],@x[$b3],@x[$c3])",
+       "&rotlwi        (@x[$b0],@x[$b0],7)",
+        "&rotlwi       (@x[$b1],@x[$b1],7)",
+         "&rotlwi      (@x[$b2],@x[$b2],7)",
+          "&rotlwi     (@x[$b3],@x[$b3],7)"
+    );
+}
+
+$code.=<<___;
+.machine       "any"
+
+.globl .ChaCha20_ctr32_int
+.align 5
+.ChaCha20_ctr32_int:
+__ChaCha20_ctr32_int:
+       ${UCMP}i $len,0
+       beqlr-
+
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+
+       $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
+       $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
+       $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
+       $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
+       $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
+       $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
+       $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
+       $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
+       $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
+       $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
+       $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
+       $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
+       $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
+       $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
+       $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
+       $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
+       $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
+       $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
+       $PUSH   r0,`$FRAME+$LRSAVE`($sp)
+
+       lwz     @d[0],0($ctr)                   # load counter
+       lwz     @d[1],4($ctr)
+       lwz     @d[2],8($ctr)
+       lwz     @d[3],12($ctr)
+
+       bl      __ChaCha20_1x
+
+       $POP    r0,`$FRAME+$LRSAVE`($sp)
+       $POP    r14,`$FRAME-$SIZE_T*18`($sp)
+       $POP    r15,`$FRAME-$SIZE_T*17`($sp)
+       $POP    r16,`$FRAME-$SIZE_T*16`($sp)
+       $POP    r17,`$FRAME-$SIZE_T*15`($sp)
+       $POP    r18,`$FRAME-$SIZE_T*14`($sp)
+       $POP    r19,`$FRAME-$SIZE_T*13`($sp)
+       $POP    r20,`$FRAME-$SIZE_T*12`($sp)
+       $POP    r21,`$FRAME-$SIZE_T*11`($sp)
+       $POP    r22,`$FRAME-$SIZE_T*10`($sp)
+       $POP    r23,`$FRAME-$SIZE_T*9`($sp)
+       $POP    r24,`$FRAME-$SIZE_T*8`($sp)
+       $POP    r25,`$FRAME-$SIZE_T*7`($sp)
+       $POP    r26,`$FRAME-$SIZE_T*6`($sp)
+       $POP    r27,`$FRAME-$SIZE_T*5`($sp)
+       $POP    r28,`$FRAME-$SIZE_T*4`($sp)
+       $POP    r29,`$FRAME-$SIZE_T*3`($sp)
+       $POP    r30,`$FRAME-$SIZE_T*2`($sp)
+       $POP    r31,`$FRAME-$SIZE_T*1`($sp)
+       mtlr    r0
+       addi    $sp,$sp,$FRAME
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,18,5,0
+       .long   0
+.size  .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
+
+.align 5
+__ChaCha20_1x:
+Loop_outer:
+       lis     @x[0],0x6170                    # synthesize sigma
+       lis     @x[1],0x3320
+       lis     @x[2],0x7962
+       lis     @x[3],0x6b20
+       ori     @x[0],@x[0],0x7865
+       ori     @x[1],@x[1],0x646e
+       ori     @x[2],@x[2],0x2d32
+       ori     @x[3],@x[3],0x6574
+
+       li      r0,10                           # inner loop counter
+       lwz     @x[4],0($key)                   # load key
+       lwz     @x[5],4($key)
+       lwz     @x[6],8($key)
+       lwz     @x[7],12($key)
+       lwz     @x[8],16($key)
+       mr      @x[12],@d[0]                    # copy counter
+       lwz     @x[9],20($key)
+       mr      @x[13],@d[1]
+       lwz     @x[10],24($key)
+       mr      @x[14],@d[2]
+       lwz     @x[11],28($key)
+       mr      @x[15],@d[3]
+
+       mr      @t[0],@x[4]
+       mr      @t[1],@x[5]
+       mr      @t[2],@x[6]
+       mr      @t[3],@x[7]
+
+       mtctr   r0
+Loop:
+___
+       foreach (&ROUND(0, 4, 8,12)) { eval; }
+       foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+       bdnz    Loop
+
+       subic   $len,$len,64                    # $len-=64
+       addi    @x[0],@x[0],0x7865              # accumulate key block
+       addi    @x[1],@x[1],0x646e
+       addi    @x[2],@x[2],0x2d32
+       addi    @x[3],@x[3],0x6574
+       addis   @x[0],@x[0],0x6170
+       addis   @x[1],@x[1],0x3320
+       addis   @x[2],@x[2],0x7962
+       addis   @x[3],@x[3],0x6b20
+
+       subfe.  r0,r0,r0                        # borrow?-1:0
+       add     @x[4],@x[4],@t[0]
+       lwz     @t[0],16($key)
+       add     @x[5],@x[5],@t[1]
+       lwz     @t[1],20($key)
+       add     @x[6],@x[6],@t[2]
+       lwz     @t[2],24($key)
+       add     @x[7],@x[7],@t[3]
+       lwz     @t[3],28($key)
+       add     @x[8],@x[8],@t[0]
+       add     @x[9],@x[9],@t[1]
+       add     @x[10],@x[10],@t[2]
+       add     @x[11],@x[11],@t[3]
+
+       add     @x[12],@x[12],@d[0]
+       add     @x[13],@x[13],@d[1]
+       add     @x[14],@x[14],@d[2]
+       add     @x[15],@x[15],@d[3]
+       addi    @d[0],@d[0],1                   # increment counter
+___
+if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) {  # flip byte order
+$code.=<<___;
+       mr      @t[$i&3],@x[$i]
+       rotlwi  @x[$i],@x[$i],8
+       rlwimi  @x[$i],@t[$i&3],24,0,7
+       rlwimi  @x[$i],@t[$i&3],24,16,23
+___
+} }
+$code.=<<___;
+       bne     Ltail                           # $len-=64 borrowed
+
+       lwz     @t[0],0($inp)                   # load input, aligned or not
+       lwz     @t[1],4($inp)
+       ${UCMP}i $len,0                         # done already?
+       lwz     @t[2],8($inp)
+       lwz     @t[3],12($inp)
+       xor     @x[0],@x[0],@t[0]               # xor with input
+       lwz     @t[0],16($inp)
+       xor     @x[1],@x[1],@t[1]
+       lwz     @t[1],20($inp)
+       xor     @x[2],@x[2],@t[2]
+       lwz     @t[2],24($inp)
+       xor     @x[3],@x[3],@t[3]
+       lwz     @t[3],28($inp)
+       xor     @x[4],@x[4],@t[0]
+       lwz     @t[0],32($inp)
+       xor     @x[5],@x[5],@t[1]
+       lwz     @t[1],36($inp)
+       xor     @x[6],@x[6],@t[2]
+       lwz     @t[2],40($inp)
+       xor     @x[7],@x[7],@t[3]
+       lwz     @t[3],44($inp)
+       xor     @x[8],@x[8],@t[0]
+       lwz     @t[0],48($inp)
+       xor     @x[9],@x[9],@t[1]
+       lwz     @t[1],52($inp)
+       xor     @x[10],@x[10],@t[2]
+       lwz     @t[2],56($inp)
+       xor     @x[11],@x[11],@t[3]
+       lwz     @t[3],60($inp)
+       xor     @x[12],@x[12],@t[0]
+       stw     @x[0],0($out)                   # store output, aligned or not
+       xor     @x[13],@x[13],@t[1]
+       stw     @x[1],4($out)
+       xor     @x[14],@x[14],@t[2]
+       stw     @x[2],8($out)
+       xor     @x[15],@x[15],@t[3]
+       stw     @x[3],12($out)
+       stw     @x[4],16($out)
+       stw     @x[5],20($out)
+       stw     @x[6],24($out)
+       stw     @x[7],28($out)
+       stw     @x[8],32($out)
+       stw     @x[9],36($out)
+       stw     @x[10],40($out)
+       stw     @x[11],44($out)
+       stw     @x[12],48($out)
+       stw     @x[13],52($out)
+       stw     @x[14],56($out)
+       addi    $inp,$inp,64
+       stw     @x[15],60($out)
+       addi    $out,$out,64
+
+       bne     Loop_outer
+
+       blr
+
+.align 4
+Ltail:
+       addi    $len,$len,64                    # restore tail length
+       subi    $inp,$inp,1                     # prepare for *++ptr
+       subi    $out,$out,1
+       addi    @t[0],$sp,$LOCALS-1
+       mtctr   $len
+
+       stw     @x[0],`$LOCALS+0`($sp)          # save whole block to stack
+       stw     @x[1],`$LOCALS+4`($sp)
+       stw     @x[2],`$LOCALS+8`($sp)
+       stw     @x[3],`$LOCALS+12`($sp)
+       stw     @x[4],`$LOCALS+16`($sp)
+       stw     @x[5],`$LOCALS+20`($sp)
+       stw     @x[6],`$LOCALS+24`($sp)
+       stw     @x[7],`$LOCALS+28`($sp)
+       stw     @x[8],`$LOCALS+32`($sp)
+       stw     @x[9],`$LOCALS+36`($sp)
+       stw     @x[10],`$LOCALS+40`($sp)
+       stw     @x[11],`$LOCALS+44`($sp)
+       stw     @x[12],`$LOCALS+48`($sp)
+       stw     @x[13],`$LOCALS+52`($sp)
+       stw     @x[14],`$LOCALS+56`($sp)
+       stw     @x[15],`$LOCALS+60`($sp)
+
+Loop_tail:                                     # byte-by-byte loop
+       lbzu    @d[0],1($inp)
+       lbzu    @x[0],1(@t[0])
+       xor     @d[1],@d[0],@x[0]
+       stbu    @d[1],1($out)
+       bdnz    Loop_tail
+
+       stw     $sp,`$LOCALS+0`($sp)            # wipe block on stack
+       stw     $sp,`$LOCALS+4`($sp)
+       stw     $sp,`$LOCALS+8`($sp)
+       stw     $sp,`$LOCALS+12`($sp)
+       stw     $sp,`$LOCALS+16`($sp)
+       stw     $sp,`$LOCALS+20`($sp)
+       stw     $sp,`$LOCALS+24`($sp)
+       stw     $sp,`$LOCALS+28`($sp)
+       stw     $sp,`$LOCALS+32`($sp)
+       stw     $sp,`$LOCALS+36`($sp)
+       stw     $sp,`$LOCALS+40`($sp)
+       stw     $sp,`$LOCALS+44`($sp)
+       stw     $sp,`$LOCALS+48`($sp)
+       stw     $sp,`$LOCALS+52`($sp)
+       stw     $sp,`$LOCALS+56`($sp)
+       stw     $sp,`$LOCALS+60`($sp)
+
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,0,0
+___
+
+{{{
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2) =
+    map("v$_",(0..14));
+my (@K)=map("v$_",(15..20));
+my ($FOUR,$sixteen,$twenty4,$twenty,$twelve,$twenty5,$seven) =
+    map("v$_",(21..27));
+my ($inpperm,$outperm,$outmask) = map("v$_",(28..30));
+my @D=("v31",$seven,$T0,$T1,$T2);
+
+my $FRAME=$LOCALS+64+13*16+18*$SIZE_T; # 13*16 is for v20-v31 offload
+
+sub VMXROUND {
+my $odd = pop;
+my ($a,$b,$c,$d,$t)=@_;
+
+       (
+       "&vadduwm       ('$a','$a','$b')",
+       "&vxor          ('$d','$d','$a')",
+       "&vperm         ('$d','$d','$d','$sixteen')",
+
+       "&vadduwm       ('$c','$c','$d')",
+       "&vxor          ('$t','$b','$c')",
+       "&vsrw          ('$b','$t','$twenty')",
+       "&vslw          ('$t','$t','$twelve')",
+       "&vor           ('$b','$b','$t')",
+
+       "&vadduwm       ('$a','$a','$b')",
+       "&vxor          ('$d','$d','$a')",
+       "&vperm         ('$d','$d','$d','$twenty4')",
+
+       "&vadduwm       ('$c','$c','$d')",
+       "&vxor          ('$t','$b','$c')",
+       "&vsrw          ('$b','$t','$twenty5')",
+       "&vslw          ('$t','$t','$seven')",
+       "&vor           ('$b','$b','$t')",
+
+       "&vsldoi        ('$c','$c','$c',8)",
+       "&vsldoi        ('$b','$b','$b',$odd?4:12)",
+       "&vsldoi        ('$d','$d','$d',$odd?12:4)"
+       );
+}
+
+$code.=<<___;
+
+.globl .ChaCha20_ctr32_vmx
+.align 5
+.ChaCha20_ctr32_vmx:
+       ${UCMP}i $len,256
+       blt     __ChaCha20_ctr32_int
+
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+       li      r10,`15+$LOCALS+64`
+       li      r11,`31+$LOCALS+64`
+       mfspr   r12,256
+       stvx    v20,r10,$sp
+       addi    r10,r10,32
+       stvx    v21,r11,$sp
+       addi    r11,r11,32
+       stvx    v22,r10,$sp
+       addi    r10,r10,32
+       stvx    v23,r11,$sp
+       addi    r11,r11,32
+       stvx    v24,r10,$sp
+       addi    r10,r10,32
+       stvx    v25,r11,$sp
+       addi    r11,r11,32
+       stvx    v26,r10,$sp
+       addi    r10,r10,32
+       stvx    v27,r11,$sp
+       addi    r11,r11,32
+       stvx    v28,r10,$sp
+       addi    r10,r10,32
+       stvx    v29,r11,$sp
+       addi    r11,r11,32
+       stvx    v30,r10,$sp
+       stvx    v31,r11,$sp
+       stw     r12,`$FRAME-$SIZE_T*18-4`($sp)  # save vrsave
+       $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
+       $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
+       $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
+       $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
+       $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
+       $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
+       $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
+       $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
+       $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
+       $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
+       $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
+       $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
+       $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
+       $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
+       $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
+       $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
+       $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
+       $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
+       li      12,-1
+       $PUSH   r0, `$FRAME+$LRSAVE`($sp)
+       mtspr   256,r12                         # preserve all AltiVec registers
+
+       bl      Lconsts                         # returns pointer Lsigma in r12
+       li      @x[0],16
+       li      @x[1],32
+       li      @x[2],48
+       li      @x[3],64
+       li      @x[4],31                        # 31 is not a typo
+       li      @x[5],15                        # nor is 15
+
+       lvx     @K[1],0,$key                    # load key
+       ?lvsr   $T0,0,$key                      # prepare unaligned load
+       lvx     @K[2],@x[0],$key
+       lvx     @D[0],@x[4],$key
+
+       lvx     @K[3],0,$ctr                    # load counter
+       ?lvsr   $T1,0,$ctr                      # prepare unaligned load
+       lvx     @D[1],@x[5],$ctr
+
+       lvx     @K[0],0,r12                     # load constants
+       lvx     @K[5],@x[0],r12                 # one
+       lvx     $FOUR,@x[1],r12
+       lvx     $sixteen,@x[2],r12
+       lvx     $twenty4,@x[3],r12
+
+       ?vperm  @K[1],@K[2],@K[1],$T0           # align key
+       ?vperm  @K[2],@D[0],@K[2],$T0
+       ?vperm  @K[3],@D[1],@K[3],$T1           # align counter
+
+       lwz     @d[0],0($ctr)                   # load counter to GPR
+       lwz     @d[1],4($ctr)
+       vadduwm @K[3],@K[3],@K[5]               # adjust AltiVec counter
+       lwz     @d[2],8($ctr)
+       vadduwm @K[4],@K[3],@K[5]
+       lwz     @d[3],12($ctr)
+       vadduwm @K[5],@K[4],@K[5]
+
+       vspltisw $twenty,-12                    # synthesize constants 
+       vspltisw $twelve,12
+       vspltisw $twenty5,-7
+       #vspltisw $seven,7                      # synthesized in the loop
+
+       vxor    $T0,$T0,$T0                     # 0x00..00
+       vspltisw $outmask,-1                    # 0xff..ff
+       ?lvsr   $inpperm,0,$inp                 # prepare for unaligned load
+       ?lvsl   $outperm,0,$out                 # prepare for unaligned store
+       ?vperm  $outmask,$outmask,$T0,$outperm
+
+       be?vspltisb $T1,3                       # 0x03..03
+       be?vxor $inpperm,$inpperm,$T1           # swap bytes within words
+       be?vxor $outperm,$outperm,$T1
+
+       b       Loop_outer_vmx
+
+.align 4
+Loop_outer_vmx:
+       lis     @x[0],0x6170                    # synthesize sigma
+       lis     @x[1],0x3320
+        vmr    $A0,@K[0]
+       lis     @x[2],0x7962
+       lis     @x[3],0x6b20
+        vmr    $A1,@K[0]
+       ori     @x[0],@x[0],0x7865
+       ori     @x[1],@x[1],0x646e
+        vmr    $A2,@K[0]
+       ori     @x[2],@x[2],0x2d32
+       ori     @x[3],@x[3],0x6574
+        vmr    $B0,@K[1]
+
+       li      r0,10                           # inner loop counter
+       lwz     @x[4],0($key)                   # load key to GPR
+        vmr    $B1,@K[1]
+       lwz     @x[5],4($key)
+        vmr    $B2,@K[1]
+       lwz     @x[6],8($key)
+        vmr    $C0,@K[2]
+       lwz     @x[7],12($key)
+        vmr    $C1,@K[2]
+       lwz     @x[8],16($key)
+        vmr    $C2,@K[2]
+       mr      @x[12],@d[0]                    # copy GPR counter
+       lwz     @x[9],20($key)
+        vmr    $D0,@K[3]
+       mr      @x[13],@d[1]
+       lwz     @x[10],24($key)
+        vmr    $D1,@K[4]
+       mr      @x[14],@d[2]
+       lwz     @x[11],28($key)
+        vmr    $D2,@K[5]
+       mr      @x[15],@d[3]
+
+       mr      @t[0],@x[4]
+       mr      @t[1],@x[5]
+       mr      @t[2],@x[6]
+       mr      @t[3],@x[7]
+       vspltisw $seven,7
+
+       mtctr   r0
+       nop
+Loop_vmx:
+___
+       my @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,0);
+       my @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,0);
+       my @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,0);
+       my @thread3=&ROUND(0,4,8,12);
+
+       foreach (@thread0) {
+               eval;                   eval(shift(@thread3));
+               eval(shift(@thread1));  eval(shift(@thread3));
+               eval(shift(@thread2));  eval(shift(@thread3));
+       }
+
+       @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,1);
+       @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,1);
+       @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,1);
+       @thread3=&ROUND(0,5,10,15);
+
+       foreach (@thread0) {
+               eval;                   eval(shift(@thread3));
+               eval(shift(@thread1));  eval(shift(@thread3));
+               eval(shift(@thread2));  eval(shift(@thread3));
+       }
+$code.=<<___;
+       bdnz    Loop_vmx
+
+       subi    $len,$len,256                   # $len-=256
+       addi    @x[0],@x[0],0x7865              # accumulate key block
+       addi    @x[1],@x[1],0x646e
+       addi    @x[2],@x[2],0x2d32
+       addi    @x[3],@x[3],0x6574
+       addis   @x[0],@x[0],0x6170
+       addis   @x[1],@x[1],0x3320
+       addis   @x[2],@x[2],0x7962
+       addis   @x[3],@x[3],0x6b20
+       add     @x[4],@x[4],@t[0]
+       lwz     @t[0],16($key)
+       add     @x[5],@x[5],@t[1]
+       lwz     @t[1],20($key)
+       add     @x[6],@x[6],@t[2]
+       lwz     @t[2],24($key)
+       add     @x[7],@x[7],@t[3]
+       lwz     @t[3],28($key)
+       add     @x[8],@x[8],@t[0]
+       add     @x[9],@x[9],@t[1]
+       add     @x[10],@x[10],@t[2]
+       add     @x[11],@x[11],@t[3]
+       add     @x[12],@x[12],@d[0]
+       add     @x[13],@x[13],@d[1]
+       add     @x[14],@x[14],@d[2]
+       add     @x[15],@x[15],@d[3]
+
+       vadduwm $A0,$A0,@K[0]                   # accumulate key block
+       vadduwm $A1,$A1,@K[0]
+       vadduwm $A2,$A2,@K[0]
+       vadduwm $B0,$B0,@K[1]
+       vadduwm $B1,$B1,@K[1]
+       vadduwm $B2,$B2,@K[1]
+       vadduwm $C0,$C0,@K[2]
+       vadduwm $C1,$C1,@K[2]
+       vadduwm $C2,$C2,@K[2]
+       vadduwm $D0,$D0,@K[3]
+       vadduwm $D1,$D1,@K[4]
+       vadduwm $D2,$D2,@K[5]
+
+       addi    @d[0],@d[0],4                   # increment counter
+       vadduwm @K[3],@K[3],$FOUR
+       vadduwm @K[4],@K[4],$FOUR
+       vadduwm @K[5],@K[5],$FOUR
+
+___
+if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) {  # flip byte order
+$code.=<<___;
+       mr      @t[$i&3],@x[$i]
+       rotlwi  @x[$i],@x[$i],8
+       rlwimi  @x[$i],@t[$i&3],24,0,7
+       rlwimi  @x[$i],@t[$i&3],24,16,23
+___
+} }
+$code.=<<___;
+       lwz     @t[0],0($inp)                   # load input, aligned or not
+       lwz     @t[1],4($inp)
+       lwz     @t[2],8($inp)
+       lwz     @t[3],12($inp)
+       xor     @x[0],@x[0],@t[0]               # xor with input
+       lwz     @t[0],16($inp)
+       xor     @x[1],@x[1],@t[1]
+       lwz     @t[1],20($inp)
+       xor     @x[2],@x[2],@t[2]
+       lwz     @t[2],24($inp)
+       xor     @x[3],@x[3],@t[3]
+       lwz     @t[3],28($inp)
+       xor     @x[4],@x[4],@t[0]
+       lwz     @t[0],32($inp)
+       xor     @x[5],@x[5],@t[1]
+       lwz     @t[1],36($inp)
+       xor     @x[6],@x[6],@t[2]
+       lwz     @t[2],40($inp)
+       xor     @x[7],@x[7],@t[3]
+       lwz     @t[3],44($inp)
+       xor     @x[8],@x[8],@t[0]
+       lwz     @t[0],48($inp)
+       xor     @x[9],@x[9],@t[1]
+       lwz     @t[1],52($inp)
+       xor     @x[10],@x[10],@t[2]
+       lwz     @t[2],56($inp)
+       xor     @x[11],@x[11],@t[3]
+       lwz     @t[3],60($inp)
+       xor     @x[12],@x[12],@t[0]
+       stw     @x[0],0($out)                   # store output, aligned or not
+       xor     @x[13],@x[13],@t[1]
+       stw     @x[1],4($out)
+       xor     @x[14],@x[14],@t[2]
+       stw     @x[2],8($out)
+       xor     @x[15],@x[15],@t[3]
+       stw     @x[3],12($out)
+       addi    $inp,$inp,64
+       stw     @x[4],16($out)
+       li      @t[0],16
+       stw     @x[5],20($out)
+       li      @t[1],32
+       stw     @x[6],24($out)
+       li      @t[2],48
+       stw     @x[7],28($out)
+       li      @t[3],64
+       stw     @x[8],32($out)
+       stw     @x[9],36($out)
+       stw     @x[10],40($out)
+       stw     @x[11],44($out)
+       stw     @x[12],48($out)
+       stw     @x[13],52($out)
+       stw     @x[14],56($out)
+       stw     @x[15],60($out)
+       addi    $out,$out,64
+
+       lvx     @D[0],0,$inp                    # load input
+       lvx     @D[1],@t[0],$inp
+       lvx     @D[2],@t[1],$inp
+       lvx     @D[3],@t[2],$inp
+       lvx     @D[4],@t[3],$inp
+       addi    $inp,$inp,64
+
+       ?vperm  @D[0],@D[1],@D[0],$inpperm      # align input
+       ?vperm  @D[1],@D[2],@D[1],$inpperm
+       ?vperm  @D[2],@D[3],@D[2],$inpperm
+       ?vperm  @D[3],@D[4],@D[3],$inpperm
+       vxor    $A0,$A0,@D[0]                   # xor with input
+       vxor    $B0,$B0,@D[1]
+       lvx     @D[1],@t[0],$inp                # keep loading input
+       vxor    $C0,$C0,@D[2]
+       lvx     @D[2],@t[1],$inp
+       vxor    $D0,$D0,@D[3]
+       lvx     @D[3],@t[2],$inp
+       lvx     @D[0],@t[3],$inp
+       addi    $inp,$inp,64
+       li      @t[3],63                        # 63 is not a typo
+       vperm   $A0,$A0,$A0,$outperm            # pre-misalign output
+       vperm   $B0,$B0,$B0,$outperm
+       vperm   $C0,$C0,$C0,$outperm
+       vperm   $D0,$D0,$D0,$outperm
+
+       ?vperm  @D[4],@D[1],@D[4],$inpperm      # align input
+       ?vperm  @D[1],@D[2],@D[1],$inpperm
+       ?vperm  @D[2],@D[3],@D[2],$inpperm
+       ?vperm  @D[3],@D[0],@D[3],$inpperm
+       vxor    $A1,$A1,@D[4]
+       vxor    $B1,$B1,@D[1]
+       lvx     @D[1],@t[0],$inp                # keep loading input
+       vxor    $C1,$C1,@D[2]
+       lvx     @D[2],@t[1],$inp
+       vxor    $D1,$D1,@D[3]
+       lvx     @D[3],@t[2],$inp
+       lvx     @D[4],@t[3],$inp                # redundant in aligned case
+       addi    $inp,$inp,64
+       vperm   $A1,$A1,$A1,$outperm            # pre-misalign output
+       vperm   $B1,$B1,$B1,$outperm
+       vperm   $C1,$C1,$C1,$outperm
+       vperm   $D1,$D1,$D1,$outperm
+
+       ?vperm  @D[0],@D[1],@D[0],$inpperm      # align input
+       ?vperm  @D[1],@D[2],@D[1],$inpperm
+       ?vperm  @D[2],@D[3],@D[2],$inpperm
+       ?vperm  @D[3],@D[4],@D[3],$inpperm
+       vxor    $A2,$A2,@D[0]
+       vxor    $B2,$B2,@D[1]
+       vxor    $C2,$C2,@D[2]
+       vxor    $D2,$D2,@D[3]
+       vperm   $A2,$A2,$A2,$outperm            # pre-misalign output
+       vperm   $B2,$B2,$B2,$outperm
+       vperm   $C2,$C2,$C2,$outperm
+       vperm   $D2,$D2,$D2,$outperm
+
+       andi.   @x[1],$out,15                   # is $out aligned?
+       mr      @x[0],$out
+
+       vsel    @D[0],$A0,$B0,$outmask          # collect pre-misaligned output
+       vsel    @D[1],$B0,$C0,$outmask
+       vsel    @D[2],$C0,$D0,$outmask
+       vsel    @D[3],$D0,$A1,$outmask
+       vsel    $B0,$A1,$B1,$outmask
+       vsel    $C0,$B1,$C1,$outmask
+       vsel    $D0,$C1,$D1,$outmask
+       vsel    $A1,$D1,$A2,$outmask
+       vsel    $B1,$A2,$B2,$outmask
+       vsel    $C1,$B2,$C2,$outmask
+       vsel    $D1,$C2,$D2,$outmask
+
+       #stvx   $A0,0,$out                      # take it easy on the edges
+       stvx    @D[0],@t[0],$out                # store output
+       stvx    @D[1],@t[1],$out
+       stvx    @D[2],@t[2],$out
+       addi    $out,$out,64
+       stvx    @D[3],0,$out
+       stvx    $B0,@t[0],$out
+       stvx    $C0,@t[1],$out
+       stvx    $D0,@t[2],$out
+       addi    $out,$out,64
+       stvx    $A1,0,$out
+       stvx    $B1,@t[0],$out
+       stvx    $C1,@t[1],$out
+       stvx    $D1,@t[2],$out
+       addi    $out,$out,64
+
+       beq     Laligned_vmx
+
+       sub     @x[2],$out,@x[1]                # in misaligned case edges
+       li      @x[3],0                         # are written byte-by-byte
+Lunaligned_tail_vmx:
+       stvebx  $D2,@x[3],@x[2]
+       addi    @x[3],@x[3],1
+       cmpw    @x[3],@x[1]
+       bne     Lunaligned_tail_vmx
+
+       sub     @x[2],@x[0],@x[1]
+Lunaligned_head_vmx:
+       stvebx  $A0,@x[1],@x[2]
+       cmpwi   @x[1],15
+       addi    @x[1],@x[1],1
+       bne     Lunaligned_head_vmx
+
+       ${UCMP}i $len,255                       # done with 256-byte blocks yet?
+       bgt     Loop_outer_vmx
+
+       b       Ldone_vmx
+
+.align 4
+Laligned_vmx:
+       stvx    $A0,0,@x[0]                     # head hexaword was not stored
+
+       ${UCMP}i $len,255                       # done with 256-byte blocks yet?
+       bgt     Loop_outer_vmx
+       nop
+
+Ldone_vmx:
+       ${UCMP}i $len,0                         # done yet?
+       bnel    __ChaCha20_1x
+
+       lwz     r12,`$FRAME-$SIZE_T*18-4`($sp)  # pull vrsave
+       li      r10,`15+$LOCALS+64`
+       li      r11,`31+$LOCALS+64`
+       mtspr   256,r12                         # restore vrsave
+       lvx     v20,r10,$sp
+       addi    r10,r10,32
+       lvx     v21,r11,$sp
+       addi    r11,r11,32
+       lvx     v22,r10,$sp
+       addi    r10,r10,32
+       lvx     v23,r11,$sp
+       addi    r11,r11,32
+       lvx     v24,r10,$sp
+       addi    r10,r10,32
+       lvx     v25,r11,$sp
+       addi    r11,r11,32
+       lvx     v26,r10,$sp
+       addi    r10,r10,32
+       lvx     v27,r11,$sp
+       addi    r11,r11,32
+       lvx     v28,r10,$sp
+       addi    r10,r10,32
+       lvx     v29,r11,$sp
+       addi    r11,r11,32
+       lvx     v30,r10,$sp
+       lvx     v31,r11,$sp
+       $POP    r0, `$FRAME+$LRSAVE`($sp)
+       $POP    r14,`$FRAME-$SIZE_T*18`($sp)
+       $POP    r15,`$FRAME-$SIZE_T*17`($sp)
+       $POP    r16,`$FRAME-$SIZE_T*16`($sp)
+       $POP    r17,`$FRAME-$SIZE_T*15`($sp)
+       $POP    r18,`$FRAME-$SIZE_T*14`($sp)
+       $POP    r19,`$FRAME-$SIZE_T*13`($sp)
+       $POP    r20,`$FRAME-$SIZE_T*12`($sp)
+       $POP    r21,`$FRAME-$SIZE_T*11`($sp)
+       $POP    r22,`$FRAME-$SIZE_T*10`($sp)
+       $POP    r23,`$FRAME-$SIZE_T*9`($sp)
+       $POP    r24,`$FRAME-$SIZE_T*8`($sp)
+       $POP    r25,`$FRAME-$SIZE_T*7`($sp)
+       $POP    r26,`$FRAME-$SIZE_T*6`($sp)
+       $POP    r27,`$FRAME-$SIZE_T*5`($sp)
+       $POP    r28,`$FRAME-$SIZE_T*4`($sp)
+       $POP    r29,`$FRAME-$SIZE_T*3`($sp)
+       $POP    r30,`$FRAME-$SIZE_T*2`($sp)
+       $POP    r31,`$FRAME-$SIZE_T*1`($sp)
+       mtlr    r0
+       addi    $sp,$sp,$FRAME
+       blr
+       .long   0
+       .byte   0,12,0x04,1,0x80,18,5,0
+       .long   0
+.size  .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
+
+.align 5
+Lconsts:
+       mflr    r0
+       bcl     20,31,\$+4
+       mflr    r12     #vvvvv "distance between . and _vpaes_consts
+       addi    r12,r12,`64-8`
+       mtlr    r0
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,0,0
+       .space  `64-9*4`
+Lsigma:
+       .long   0x61707865,0x3320646e,0x79622d32,0x6b206574
+       .long   1,0,0,0
+       .long   4,0,0,0
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       .long   0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
+       .long   0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);   # flipped words
+       .long   0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
+       .long   0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
+___
+$code.=<<___;
+.asciz  "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+}}}
+
+foreach (split("\n",$code)) {
+       s/\`([^\`]*)\`/eval $1/ge;
+
+       # instructions prefixed with '?' are endian-specific and need
+       # to be adjusted accordingly...
+       if ($flavour !~ /le$/) {        # big-endian
+           s/be\?//            or
+           s/le\?/#le#/        or
+           s/\?lvsr/lvsl/      or
+           s/\?lvsl/lvsr/      or
+           s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
+           s/(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/;
+       } else {                        # little-endian
+           s/le\?//            or
+           s/be\?/#be#/        or
+           s/\?([a-z]+)/$1/;
+       }
+
+       print $_,"\n";
+}
+
+close STDOUT;
index c56c624..3ae640f 100644 (file)
@@ -8,6 +8,8 @@ BEGINRAW[Makefile(unix)]
        $(PERL) {- $sourcedir -}/asm/chacha-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
 {- $builddir -}/chacha-x86_64.s:       {- $sourcedir -}/asm/chacha-x86_64.pl
        $(PERL) {- $sourcedir -}/asm/chacha-x86_64.pl $(PERLASM_SCHEME) > $@
+{- $builddir -}/chacha-ppc.s:  {- $sourcedir -}/asm/chacha-ppc.pl
+       $(PERL) {- $sourcedir -}/asm/chacha-ppc.pl $(PERLASM_SCHEME) $@
 
 {- $builddir -}/chacha-%.S:    {- $sourcedir -}/asm/chacha-%.pl
        $(PERL) $< $(PERLASM_SCHEME) $@
index 0984a52..d027eed 100644 (file)
@@ -42,6 +42,10 @@ poly1305-x86.s:              asm/poly1305-x86.pl
        $(PERL) asm/poly1305-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
 poly1305-x86_64.s:     asm/poly1305-x86_64.pl
        $(PERL) asm/poly1305-x86_64.pl $(PERLASM_SCHEME) > $@
+poly1305-ppc.s:                asm/poly1305-ppc.pl
+       $(PERL) asm/poly1305-ppc.pl $(PERLASM_SCHEME) $@
+poly1305-ppcfp.s:      asm/poly1305-ppcfp.pl
+       $(PERL) asm/poly1305-ppcfp.pl $(PERLASM_SCHEME) $@
 
 poly1305-%.S:  asm/poly1305-%.pl;      $(PERL) $< $(PERLASM_SCHEME) $@
 
diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl
new file mode 100755 (executable)
index 0000000..46130c9
--- /dev/null
@@ -0,0 +1,636 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements Poly1305 hash for PowerPC.
+#
+# June 2015
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone,
+# and improvement coefficients relative to gcc-generated code.
+#
+#                      -m32            -m64
+#
+# Freescale e300       14.8/+80%       -
+# PPC74x0              7.40/+60%       -
+# PPC970               7.20/+114%      3.51/+205%
+# POWER6               3.96/+250%      2.02/+170%
+# POWER7               3.67/+260%      1.87/+100%
+# POWER8               -               2.13/+200%
+#
+# Do we need floating-point implementation for PPC? Results presented
+# in poly1305_ieee754.c are tricky to compare to, because they are for
+# compiler-generated code. On the other hand it's known that floating-
+# point performance can be dominated by FPU latency, which means that
+# there is limit even for ideally optimized (and even vectorized) code.
+# And this limit is estimated to be higher than above -m64 results. Or
+# in other words floating-point implementation can be meaningful to
+# consider only in 32-bit application context. We probably have to
+# recognize that 32-bit builds are getting less popular on high-end
+# systems and therefore tend to target embedded ones, which might not
+# even have FPU...
+#
+# On side note, Power ISA 2.07 enables vector base 2^26 implementation,
+# and POWER8 might have capacity to break 1.0 cycle per byte barrier...
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+       $SIZE_T =8;
+       $LRSAVE =2*$SIZE_T;
+       $UCMP   ="cmpld";
+       $STU    ="stdu";
+       $POP    ="ld";
+       $PUSH   ="std";
+} elsif ($flavour =~ /32/) {
+       $SIZE_T =4;
+       $LRSAVE =$SIZE_T;
+       $UCMP   ="cmplw";
+       $STU    ="stwu";
+       $POP    ="lwz";
+       $PUSH   ="stw";
+} else { die "nonsense $flavour"; }
+
+# Define endianess based on flavour
+# i.e.: linux64le
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=24*$SIZE_T;
+
+$sp="r1";
+my ($ctx,$inp,$len,$padbit) = map("r$_",(3..6));
+my ($mac,$nonce)=($inp,$len);
+my $mask = "r0";
+
+$code=<<___;
+.machine       "any"
+.text
+___
+                                                       if ($flavour =~ /64/) {
+###############################################################################
+# base 2^64 implementation
+
+my ($h0,$h1,$h2,$d0,$d1,$d2, $r0,$r1,$s1, $t0,$t1) = map("r$_",(7..12,27..31));
+
+$code.=<<___;
+.globl .poly1305_init_int
+.align 4
+.poly1305_init_int:
+       xor     r0,r0,r0
+       std     r0,0($ctx)              # zero hash value
+       std     r0,8($ctx)
+       std     r0,16($ctx)
+
+       $UCMP   $inp,r0
+       beq-    Lno_key
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       ld      $d0,0($inp)             # load key material
+       ld      $d1,8($inp)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       li      $h0,4
+       lwbrx   $d0,0,$inp              # load key material
+       li      $d1,8
+       lwbrx   $h0,$h0,$inp
+       li      $h1,12
+       lwbrx   $d1,$d1,$inp
+       lwbrx   $h1,$h1,$inp
+       insrdi  $d0,$h0,32,0
+       insrdi  $d1,$h1,32,0
+___
+$code.=<<___;
+       lis     $h1,0xfff               # 0x0fff0000
+       ori     $h1,$h1,0xfffc          # 0x0ffffffc
+       insrdi  $h1,$h1,32,0            # 0x0ffffffc0ffffffc
+       ori     $h0,$h1,3               # 0x0ffffffc0fffffff
+
+       and     $d0,$d0,$h0
+       and     $d1,$d1,$h1
+
+       std     $d0,32($ctx)            # store key
+       std     $d1,40($ctx)
+
+Lno_key:
+       xor     r3,r3,r3
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,2,0
+.size  .poly1305_init_int,.-.poly1305_init_int
+
+.globl .poly1305_blocks
+.align 4
+.poly1305_blocks:
+       srdi.   $len,$len,4
+       beq-    Labort
+
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+       $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
+       $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
+       $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
+       $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
+       $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
+       $PUSH   r0,`$FRAME+$LRSAVE`($sp)
+
+       ld      $r0,32($ctx)            # load key
+       ld      $r1,40($ctx)
+
+       ld      $h0,0($ctx)             # load hash value
+       ld      $h1,8($ctx)
+       ld      $h2,16($ctx)
+
+       srdi    $s1,$r1,2
+       mtctr   $len
+       add     $s1,$s1,$r1             # s1 = r1 + r1>>2
+       li      $mask,3
+       b       Loop
+
+.align 4
+Loop:
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       ld      $t0,0($inp)             # load input
+       ld      $t1,8($inp)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       li      $d0,4
+       lwbrx   $t0,0,$inp              # load input
+       li      $t1,8
+       lwbrx   $d0,$d0,$inp
+       li      $d1,12
+       lwbrx   $t1,$t1,$inp
+       lwbrx   $d1,$d1,$inp
+       insrdi  $t0,$d0,32,0
+       insrdi  $t1,$d1,32,0
+___
+$code.=<<___;
+       addi    $inp,$inp,16
+
+       addc    $h0,$h0,$t0             # accumulate input
+       adde    $h1,$h1,$t1
+
+       mulld   $d0,$h0,$r0             # h0*r0
+       mulhdu  $d1,$h0,$r0
+       adde    $h2,$h2,$padbit
+
+       mulld   $t0,$h1,$s1             # h1*5*r1
+       mulhdu  $t1,$h1,$s1
+       addc    $d0,$d0,$t0
+       adde    $d1,$d1,$t1
+
+       mulld   $t0,$h0,$r1             # h0*r1
+       mulhdu  $d2,$h0,$r1
+       addc    $d1,$d1,$t0
+       addze   $d2,$d2
+
+       mulld   $t0,$h1,$r0             # h1*r0
+       mulhdu  $t1,$h1,$r0
+       addc    $d1,$d1,$t0
+       adde    $d2,$d2,$t1
+
+       mulld   $t0,$h2,$s1             # h2*5*r1
+       mulld   $t1,$h2,$r0             # h2*r0
+       addc    $d1,$d1,$t0
+       adde    $d2,$d2,$t1
+
+       andc    $t0,$d2,$mask           # final reduction step
+       and     $h2,$d2,$mask
+       srdi    $t1,$t0,2
+       add     $t0,$t0,$t1
+       addc    $h0,$d0,$t0
+       addze   $h1,$d1
+
+       bdnz    Loop
+
+       std     $h0,0($ctx)             # store hash value
+       std     $h1,8($ctx)
+       std     $h2,16($ctx)
+
+       $POP    r27,`$FRAME-$SIZE_T*5`($sp)
+       $POP    r28,`$FRAME-$SIZE_T*4`($sp)
+       $POP    r29,`$FRAME-$SIZE_T*3`($sp)
+       $POP    r30,`$FRAME-$SIZE_T*2`($sp)
+       $POP    r31,`$FRAME-$SIZE_T*1`($sp)
+       addi    $sp,$sp,$FRAME
+Labort:
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,5,4,0
+.size  .poly1305_blocks,.-.poly1305_blocks
+
+.globl .poly1305_emit
+.align 4
+.poly1305_emit:
+       ld      $h0,0($ctx)             # load hash
+       ld      $h1,8($ctx)
+       ld      $h2,16($ctx)
+       ld      $padbit,0($nonce)       # load nonce
+       ld      $nonce,8($nonce)
+
+       addic   $d0,$h0,5               # compare to modulus
+       addze   $d1,$h1
+       addze   $d2,$h2
+
+       srdi    $mask,$d2,2             # did it carry/borrow?
+       neg     $mask,$mask
+
+       andc    $h0,$h0,$mask
+       and     $d0,$d0,$mask
+       andc    $h1,$h1,$mask
+       and     $d1,$d1,$mask
+       or      $h0,$h0,$d0
+       or      $h1,$h1,$d1
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       rotldi  $padbit,$padbit,32      # flip nonce words
+       rotldi  $nonce,$nonce,32
+___
+$code.=<<___;
+       addc    $h0,$h0,$padbit         # accumulate nonce
+       adde    $h1,$h1,$nonce
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       std     $h0,0($mac)             # write result
+       std     $h1,8($mac)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       extrdi  r0,$h0,32,0
+       li      $d0,4
+       stwbrx  $h0,0,$mac              # write result
+       extrdi  $h0,$h1,32,0
+       li      $d1,8
+       stwbrx  r0,$d0,$mac
+       li      $d2,12
+       stwbrx  $h1,$d1,$mac
+       stwbrx  $h0,$d2,$mac
+___
+$code.=<<___;
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,3,0
+.size  .poly1305_emit,.-.poly1305_emit
+___
+                                                       } else {
+###############################################################################
+# base 2^32 implementation
+
+my ($h0,$h1,$h2,$h3,$h4, $r0,$r1,$r2,$r3, $s1,$s2,$s3,
+    $t0,$t1,$t2,$t3, $D0,$D1,$D2,$D3, $d0,$d1,$d2,$d3
+   ) = map("r$_",(7..12,14..31));
+
+$code.=<<___;
+.globl .poly1305_init_int
+.align 4
+.poly1305_init_int:
+       xor     r0,r0,r0
+       stw     r0,0($ctx)              # zero hash value
+       stw     r0,4($ctx)
+       stw     r0,8($ctx)
+       stw     r0,12($ctx)
+       stw     r0,16($ctx)
+
+       $UCMP   $inp,r0
+       beq-    Lno_key
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       lw      $h0,0($inp)             # load key material
+       lw      $h1,4($inp)
+       lw      $h2,8($inp)
+       lw      $h3,12($inp)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       li      $h1,4
+       lwbrx   $h0,0,$inp              # load key material
+       li      $h2,8
+       lwbrx   $h1,$h1,$inp
+       li      $h3,12
+       lwbrx   $h2,$h2,$inp
+       lwbrx   $h3,$h3,$inp
+___
+$code.=<<___;
+       lis     $mask,0xf000            # 0xf0000000
+       li      $r0,-4
+       andc    $r0,$r0,$mask           # 0x0ffffffc
+
+       andc    $h0,$h0,$mask
+       and     $h1,$h1,$r0
+       and     $h2,$h2,$r0
+       and     $h3,$h3,$r0
+
+       stw     $h0,32($ctx)            # store key
+       stw     $h1,36($ctx)
+       stw     $h2,40($ctx)
+       stw     $h3,44($ctx)
+
+Lno_key:
+       xor     r3,r3,r3
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,2,0
+.size  .poly1305_init_int,.-.poly1305_init_int
+
+.globl .poly1305_blocks
+.align 4
+.poly1305_blocks:
+       srwi.   $len,$len,4
+       beq-    Labort
+
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+       $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
+       $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
+       $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
+       $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
+       $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
+       $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
+       $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
+       $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
+       $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
+       $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
+       $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
+       $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
+       $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
+       $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
+       $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
+       $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
+       $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
+       $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
+       $PUSH   r0,`$FRAME+$LRSAVE`($sp)
+
+       lwz     $r0,32($ctx)            # load key
+       lwz     $r1,36($ctx)
+       lwz     $r2,40($ctx)
+       lwz     $r3,44($ctx)
+
+       lwz     $h0,0($ctx)             # load hash value
+       lwz     $h1,4($ctx)
+       lwz     $h2,8($ctx)
+       lwz     $h3,12($ctx)
+       lwz     $h4,16($ctx)
+
+       srwi    $s1,$r1,2
+       srwi    $s2,$r2,2
+       srwi    $s3,$r3,2
+       add     $s1,$s1,$r1             # si = ri + ri>>2
+       add     $s2,$s2,$r2
+       add     $s3,$s3,$r3
+       mtctr   $len
+       li      $mask,3
+       b       Loop
+
+.align 4
+Loop:
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       lwz     $d0,0($inp)             # load input
+       lwz     $d1,4($inp)
+       lwz     $d2,8($inp)
+       lwz     $d3,12($inp)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       li      $d1,4
+       lwbrx   $d0,0,$inp              # load input
+       li      $d2,8
+       lwbrx   $d1,$d1,$inp
+       li      $d3,12
+       lwbrx   $d2,$d2,$inp
+       lwbrx   $d3,$d3,$inp
+___
+$code.=<<___;
+       addi    $inp,$inp,16
+
+       addc    $h0,$h0,$d0             # accumulate input
+       adde    $h1,$h1,$d1
+       adde    $h2,$h2,$d2
+
+       mullw   $d0,$h0,$r0             # h0*r0
+       mulhwu  $D0,$h0,$r0
+
+       mullw   $d1,$h0,$r1             # h0*r1
+       mulhwu  $D1,$h0,$r1
+
+       mullw   $d2,$h0,$r2             # h0*r2
+       mulhwu  $D2,$h0,$r2
+
+        adde   $h3,$h3,$d3
+        adde   $h4,$h4,$padbit
+
+       mullw   $d3,$h0,$r3             # h0*r3
+       mulhwu  $D3,$h0,$r3
+
+       mullw   $t0,$h1,$s3             # h1*s3
+       mulhwu  $t1,$h1,$s3
+
+       mullw   $t2,$h1,$r0             # h1*r0
+       mulhwu  $t3,$h1,$r0
+        addc   $d0,$d0,$t0
+        adde   $D0,$D0,$t1
+
+       mullw   $t0,$h1,$r1             # h1*r1
+       mulhwu  $t1,$h1,$r1
+        addc   $d1,$d1,$t2
+        adde   $D1,$D1,$t3
+
+       mullw   $t2,$h1,$r2             # h1*r2
+       mulhwu  $t3,$h1,$r2
+        addc   $d2,$d2,$t0
+        adde   $D2,$D2,$t1
+
+       mullw   $t0,$h2,$s2             # h2*s2
+       mulhwu  $t1,$h2,$s2
+        addc   $d3,$d3,$t2
+        adde   $D3,$D3,$t3
+
+       mullw   $t2,$h2,$s3             # h2*s3
+       mulhwu  $t3,$h2,$s3
+        addc   $d0,$d0,$t0
+        adde   $D0,$D0,$t1
+
+       mullw   $t0,$h2,$r0             # h2*r0
+       mulhwu  $t1,$h2,$r0
+        addc   $d1,$d1,$t2
+        adde   $D1,$D1,$t3
+
+       mullw   $t2,$h2,$r1             # h2*r1
+       mulhwu  $t3,$h2,$r1
+        addc   $d2,$d2,$t0
+        adde   $D2,$D2,$t1
+
+       mullw   $t0,$h3,$s1             # h3*s1
+       mulhwu  $t1,$h3,$s1
+        addc   $d3,$d3,$t2
+        adde   $D3,$D3,$t3
+
+       mullw   $t2,$h3,$s2             # h3*s2
+       mulhwu  $t3,$h3,$s2
+        addc   $d0,$d0,$t0
+        adde   $D0,$D0,$t1
+
+       mullw   $t0,$h3,$s3             # h3*s3
+       mulhwu  $t1,$h3,$s3
+        addc   $d1,$d1,$t2
+        adde   $D1,$D1,$t3
+
+       mullw   $t2,$h3,$r0             # h3*r0
+       mulhwu  $t3,$h3,$r0
+        addc   $d2,$d2,$t0
+        adde   $D2,$D2,$t1
+
+       mullw   $t0,$h4,$s1             # h4*s1
+        addc   $d3,$d3,$t2
+        adde   $D3,$D3,$t3
+       addc    $d1,$d1,$t0
+
+       mullw   $t1,$h4,$s2             # h4*s2
+        addze  $D1,$D1
+       addc    $d2,$d2,$t1
+       addze   $D2,$D2
+
+       mullw   $t2,$h4,$s3             # h4*s3
+       addc    $d3,$d3,$t2
+       addze   $D3,$D3
+
+       mullw   $h4,$h4,$r0             # h4*r0
+
+       addc    $h1,$d1,$D0
+       adde    $h2,$d2,$D1
+       adde    $h3,$d3,$D2
+       adde    $h4,$h4,$D3
+
+       andc    $D0,$h4,$mask           # final reduction step
+       and     $h4,$h4,$mask
+       srwi    $D1,$D0,2
+       add     $D0,$D0,$D1
+       addc    $h0,$d0,$D0
+       addze   $h1,$h1
+       addze   $h2,$h2
+       addze   $h3,$h3
+
+       bdnz    Loop
+
+       stw     $h0,0($ctx)             # store hash value
+       stw     $h1,4($ctx)
+       stw     $h2,8($ctx)
+       stw     $h3,12($ctx)
+       stw     $h4,16($ctx)
+
+       $POP    r14,`$FRAME-$SIZE_T*18`($sp)
+       $POP    r15,`$FRAME-$SIZE_T*17`($sp)
+       $POP    r16,`$FRAME-$SIZE_T*16`($sp)
+       $POP    r17,`$FRAME-$SIZE_T*15`($sp)
+       $POP    r18,`$FRAME-$SIZE_T*14`($sp)
+       $POP    r19,`$FRAME-$SIZE_T*13`($sp)
+       $POP    r20,`$FRAME-$SIZE_T*12`($sp)
+       $POP    r21,`$FRAME-$SIZE_T*11`($sp)
+       $POP    r22,`$FRAME-$SIZE_T*10`($sp)
+       $POP    r23,`$FRAME-$SIZE_T*9`($sp)
+       $POP    r24,`$FRAME-$SIZE_T*8`($sp)
+       $POP    r25,`$FRAME-$SIZE_T*7`($sp)
+       $POP    r26,`$FRAME-$SIZE_T*6`($sp)
+       $POP    r27,`$FRAME-$SIZE_T*5`($sp)
+       $POP    r28,`$FRAME-$SIZE_T*4`($sp)
+       $POP    r29,`$FRAME-$SIZE_T*3`($sp)
+       $POP    r30,`$FRAME-$SIZE_T*2`($sp)
+       $POP    r31,`$FRAME-$SIZE_T*1`($sp)
+       addi    $sp,$sp,$FRAME
+Labort:
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,18,4,0
+.size  .poly1305_blocks,.-.poly1305_blocks
+
+.globl .poly1305_emit
+.align 4
+.poly1305_emit:
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+       $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
+       $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
+       $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
+       $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
+       $PUSH   r0,`$FRAME+$LRSAVE`($sp)
+
+       lwz     $h0,0($ctx)             # load hash
+       lwz     $h1,4($ctx)
+       lwz     $h2,8($ctx)
+       lwz     $h3,12($ctx)
+       lwz     $h4,16($ctx)
+
+       addic   $d0,$h0,5               # compare to modulus
+       addze   $d1,$h1
+       addze   $d2,$h2
+       addze   $d3,$h3
+       addze   $mask,$h4
+
+       srwi    $mask,$mask,2           # did it carry/borrow?
+       neg     $mask,$mask
+
+       andc    $h0,$h0,$mask
+       and     $d0,$d0,$mask
+       andc    $h1,$h1,$mask
+       and     $d1,$d1,$mask
+       or      $h0,$h0,$d0
+       lwz     $d0,0($nonce)           # load nonce
+       andc    $h2,$h2,$mask
+       and     $d2,$d2,$mask
+       or      $h1,$h1,$d1
+       lwz     $d1,4($nonce)
+       andc    $h3,$h3,$mask
+       and     $d3,$d3,$mask
+       or      $h2,$h2,$d2
+       lwz     $d2,8($nonce)
+       or      $h3,$h3,$d3
+       lwz     $d3,12($nonce)
+
+       addc    $h0,$h0,$d0             # accumulate nonce
+       adde    $h1,$h1,$d1
+       adde    $h2,$h2,$d2
+       adde    $h3,$h3,$d3
+___
+$code.=<<___   if ($LITTLE_ENDIAN);
+       stw     $h0,0($mac)             # write result
+       stw     $h1,4($mac)
+       stw     $h2,8($mac)
+       stw     $h3,12($mac)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       li      $d1,4
+       stwbrx  $h0,0,$mac              # write result
+       li      $d2,8
+       stwbrx  $h1,$d1,$mac
+       li      $d3,12
+       stwbrx  $h2,$d2,$mac
+       stwbrx  $h3,$d3,$mac
+___
+$code.=<<___;
+       $POP    r28,`$FRAME-$SIZE_T*4`($sp)
+       $POP    r29,`$FRAME-$SIZE_T*3`($sp)
+       $POP    r30,`$FRAME-$SIZE_T*2`($sp)
+       $POP    r31,`$FRAME-$SIZE_T*1`($sp)
+       addi    $sp,$sp,$FRAME
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,4,3,0
+.size  .poly1305_emit,.-.poly1305_emit
+___
+                                                       }
+$code.=<<___;
+.asciz "Poly1305 for PPC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/poly1305/asm/poly1305-ppcfp.pl b/crypto/poly1305/asm/poly1305-ppcfp.pl
new file mode 100755 (executable)
index 0000000..061a556
--- /dev/null
@@ -0,0 +1,732 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements Poly1305 hash for PowerPC FPU.
+#
+# June 2015
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone,
+# and improvement coefficients relative to gcc-generated code.
+#
+# Freescale e300       9.78/+30%
+# PPC74x0              7.08/+50%
+# PPC970               6.24/+80%
+# POWER7               3.50/+30%
+# POWER8               3.75/+10%
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+       $SIZE_T =8;
+       $LRSAVE =2*$SIZE_T;
+       $UCMP   ="cmpld";
+       $STU    ="stdu";
+       $POP    ="ld";
+       $PUSH   ="std";
+} elsif ($flavour =~ /32/) {
+       $SIZE_T =4;
+       $LRSAVE =$SIZE_T;
+       $UCMP   ="cmplw";
+       $STU    ="stwu";
+       $POP    ="lwz";
+       $PUSH   ="stw";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? 4 : 0;
+
+$LWXLE = $LITTLE_ENDIAN ? "lwzx" : "lwbrx";
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$LOCALS=6*$SIZE_T;
+$FRAME=$LOCALS+6*8+18*8;
+
+my $sp="r1";
+
+my ($ctx,$inp,$len,$padbit) = map("r$_",(3..6));
+my ($in0,$in1,$in2,$in3,$i1,$i2,$i3) = map("r$_",(7..12,6));
+
+my ($h0lo,$h0hi,$h1lo,$h1hi,$h2lo,$h2hi,$h3lo,$h3hi,
+    $two0,$two32,$two64,$two96,$two130,$five_two130,
+    $r0lo,$r0hi,$r1lo,$r1hi,$r2lo,$r2hi,
+    $s2lo,$s2hi,$s3lo,$s3hi,
+    $c0lo,$c0hi,$c1lo,$c1hi,$c2lo,$c2hi,$c3lo,$c3hi) = map("f$_",(0..31));
+# borrowings
+my ($r3lo,$r3hi,$s1lo,$s1hi) = ($c0lo,$c0hi,$c1lo,$c1hi);
+my ($x0,$x1,$x2,$x3) = ($c2lo,$c2hi,$c3lo,$c3hi);
+my ($y0,$y1,$y2,$y3) = ($c3lo,$c3hi,$c1lo,$c1hi);
+
+$code.=<<___;
+.machine       "any"
+.text
+
+.globl .poly1305_init_fpu
+.align 6
+.poly1305_init_fpu:
+       $STU    $sp,-$LOCALS($sp)               # minimal frame
+       mflr    $padbit
+       $PUSH   $padbit,`$LOCALS+$LRSAVE`($sp)
+
+       bl      LPICmeup
+
+       xor     r0,r0,r0
+       mtlr    $padbit                         # restore lr
+
+       lfd     $two0,8*0($len)                 # load constants
+       lfd     $two32,8*1($len)
+       lfd     $two64,8*2($len)
+       lfd     $two96,8*3($len)
+       lfd     $two130,8*4($len)
+       lfd     $five_two130,8*5($len)
+
+       stfd    $two0,8*0($ctx)                 # initial hash value, biased 0
+       stfd    $two32,8*1($ctx)
+       stfd    $two64,8*2($ctx)
+       stfd    $two96,8*3($ctx)
+
+       $UCMP   $inp,r0
+       beq-    Lno_key
+
+       lfd     $h3lo,8*13($len)                # new fpscr
+       mffs    $h3hi                           # old fpscr
+
+       stfd    $two0,8*4($ctx)                 # key "template"
+       stfd    $two32,8*5($ctx)
+       stfd    $two64,8*6($ctx)
+       stfd    $two96,8*7($ctx)
+
+       li      $in1,4
+       li      $in2,8
+       li      $in3,12
+       $LWXLE  $in0,0,$inp                     # load key
+       $LWXLE  $in1,$in1,$inp
+       $LWXLE  $in2,$in2,$inp
+       $LWXLE  $in3,$in3,$inp
+
+       lis     $i1,0xf000                      #   0xf0000000
+       ori     $i2,$i1,3                       #   0xf0000003
+       andc    $in0,$in0,$i1                   # &=0x0fffffff
+       andc    $in1,$in1,$i2                   # &=0x0ffffffc
+       andc    $in2,$in2,$i2
+       andc    $in3,$in3,$i2
+
+       stw     $in0,`8*4+(4^$LITTLE_ENDIAN)`($ctx)     # fill "template"
+       stw     $in1,`8*5+(4^$LITTLE_ENDIAN)`($ctx)
+       stw     $in2,`8*6+(4^$LITTLE_ENDIAN)`($ctx)
+       stw     $in3,`8*7+(4^$LITTLE_ENDIAN)`($ctx)
+
+       mtfsf   255,$h3lo                       # fpscr
+       stfd    $two0,8*18($ctx)                # copy constants to context
+       stfd    $two32,8*19($ctx)
+       stfd    $two64,8*20($ctx)
+       stfd    $two96,8*21($ctx)
+       stfd    $two130,8*22($ctx)
+       stfd    $five_two130,8*23($ctx)
+
+       lfd     $h0lo,8*4($ctx)                 # load [biased] key
+       lfd     $h1lo,8*5($ctx)
+       lfd     $h2lo,8*6($ctx)
+       lfd     $h3lo,8*7($ctx)
+
+       fsub    $h0lo,$h0lo,$two0               # r0
+       fsub    $h1lo,$h1lo,$two32              # r1
+       fsub    $h2lo,$h2lo,$two64              # r2
+       fsub    $h3lo,$h3lo,$two96              # r3
+
+       lfd     $two0,8*6($len)                 # more constants
+       lfd     $two32,8*7($len)
+       lfd     $two64,8*8($len)
+       lfd     $two96,8*9($len)
+
+       fmul    $h1hi,$h1lo,$five_two130        # s1
+       fmul    $h2hi,$h2lo,$five_two130        # s2
+        stfd   $h3hi,8*15($ctx)                # borrow slot for original fpscr
+       fmul    $h3hi,$h3lo,$five_two130        # s3
+
+       fadd    $h0hi,$h0lo,$two0
+        stfd   $h1hi,8*12($ctx)                # put aside for now
+       fadd    $h1hi,$h1lo,$two32
+        stfd   $h2hi,8*13($ctx)
+       fadd    $h2hi,$h2lo,$two64
+        stfd   $h3hi,8*14($ctx)
+       fadd    $h3hi,$h3lo,$two96
+
+       fsub    $h0hi,$h0hi,$two0
+       fsub    $h1hi,$h1hi,$two32
+       fsub    $h2hi,$h2hi,$two64
+       fsub    $h3hi,$h3hi,$two96
+
+       lfd     $two0,8*10($len)                # more constants
+       lfd     $two32,8*11($len)
+       lfd     $two64,8*12($len)
+
+       fsub    $h0lo,$h0lo,$h0hi
+       fsub    $h1lo,$h1lo,$h1hi
+       fsub    $h2lo,$h2lo,$h2hi
+       fsub    $h3lo,$h3lo,$h3hi
+
+       stfd    $h0hi,8*5($ctx)                 # r0hi
+       stfd    $h1hi,8*7($ctx)                 # r1hi
+       stfd    $h2hi,8*9($ctx)                 # r2hi
+       stfd    $h3hi,8*11($ctx)                # r3hi
+
+       stfd    $h0lo,8*4($ctx)                 # r0lo
+       stfd    $h1lo,8*6($ctx)                 # r1lo
+       stfd    $h2lo,8*8($ctx)                 # r2lo
+       stfd    $h3lo,8*10($ctx)                # r3lo
+
+       lfd     $h1lo,8*12($ctx)                # s1
+       lfd     $h2lo,8*13($ctx)                # s2
+       lfd     $h3lo,8*14($ctx)                # s3
+       lfd     $h0lo,8*15($ctx)                # pull original fpscr
+
+       fadd    $h1hi,$h1lo,$two0
+       fadd    $h2hi,$h2lo,$two32
+       fadd    $h3hi,$h3lo,$two64
+
+       fsub    $h1hi,$h1hi,$two0
+       fsub    $h2hi,$h2hi,$two32
+       fsub    $h3hi,$h3hi,$two64
+
+       fsub    $h1lo,$h1lo,$h1hi
+       fsub    $h2lo,$h2lo,$h2hi
+       fsub    $h3lo,$h3lo,$h3hi
+
+       stfd    $h1hi,8*13($ctx)                # s1hi
+       stfd    $h2hi,8*15($ctx)                # s2hi
+       stfd    $h3hi,8*17($ctx)                # s3hi
+
+       stfd    $h1lo,8*12($ctx)                # s1lo
+       stfd    $h2lo,8*14($ctx)                # s2lo
+       stfd    $h3lo,8*16($ctx)                # s3lo
+
+       mtfsf   255,$h0lo                       # restore fpscr
+Lno_key:
+       xor     r3,r3,r3
+       addi    $sp,$sp,$LOCALS
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,0,2,0
+.size  .poly1305_init_fpu,.-.poly1305_init_fpu
+
+.globl .poly1305_blocks_fpu
+.align 4
+.poly1305_blocks_fpu:
+       srwi.   $len,$len,4
+       beq-    Labort
+
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+       stfd    f14,`$FRAME-8*18`($sp)
+       stfd    f15,`$FRAME-8*17`($sp)
+       stfd    f16,`$FRAME-8*16`($sp)
+       stfd    f17,`$FRAME-8*15`($sp)
+       stfd    f18,`$FRAME-8*14`($sp)
+       stfd    f19,`$FRAME-8*13`($sp)
+       stfd    f20,`$FRAME-8*12`($sp)
+       stfd    f21,`$FRAME-8*11`($sp)
+       stfd    f22,`$FRAME-8*10`($sp)
+       stfd    f23,`$FRAME-8*9`($sp)
+       stfd    f24,`$FRAME-8*8`($sp)
+       stfd    f25,`$FRAME-8*7`($sp)
+       stfd    f26,`$FRAME-8*6`($sp)
+       stfd    f27,`$FRAME-8*5`($sp)
+       stfd    f28,`$FRAME-8*4`($sp)
+       stfd    f29,`$FRAME-8*3`($sp)
+       stfd    f30,`$FRAME-8*2`($sp)
+       stfd    f31,`$FRAME-8*1`($sp)
+       $PUSH   r0,`$FRAME+$LRSAVE`($sp)
+
+       xor     r0,r0,r0
+       li      $in3,1
+       mtctr   $len
+       neg     $len,$len
+       stw     r0,`$LOCALS+8*4+(0^$LITTLE_ENDIAN)`($sp)
+       stw     $in3,`$LOCALS+8*4+(4^$LITTLE_ENDIAN)`($sp)
+
+       lfd     $two0,8*18($ctx)                # load constants
+       lfd     $two32,8*19($ctx)
+       lfd     $two64,8*20($ctx)
+       lfd     $two96,8*21($ctx)
+       lfd     $two130,8*22($ctx)
+       lfd     $five_two130,8*23($ctx)
+
+       lfd     $h0lo,8*0($ctx)                 # load [biased] hash value
+       lfd     $h1lo,8*1($ctx)
+       lfd     $h2lo,8*2($ctx)
+       lfd     $h3lo,8*3($ctx)
+
+       stfd    $two0,`$LOCALS+8*0`($sp)        # input "template"
+       oris    $in3,$padbit,`(1023+52+96)<<4`
+       stfd    $two32,`$LOCALS+8*1`($sp)
+       stfd    $two64,`$LOCALS+8*2`($sp)
+       stw     $in3,`$LOCALS+8*3+(0^$LITTLE_ENDIAN)`($sp)
+
+       li      $i1,4
+       li      $i2,8
+       li      $i3,12
+       $LWXLE  $in0,0,$inp                     # load input
+       $LWXLE  $in1,$i1,$inp
+       $LWXLE  $in2,$i2,$inp
+       $LWXLE  $in3,$i3,$inp
+       addi    $inp,$inp,16
+
+       stw     $in0,`$LOCALS+8*0+(4^$LITTLE_ENDIAN)`($sp)      # fill "template"
+       stw     $in1,`$LOCALS+8*1+(4^$LITTLE_ENDIAN)`($sp)
+       stw     $in2,`$LOCALS+8*2+(4^$LITTLE_ENDIAN)`($sp)
+       stw     $in3,`$LOCALS+8*3+(4^$LITTLE_ENDIAN)`($sp)
+
+       mffs    $x0                             # original fpscr
+       lfd     $x1,`$LOCALS+8*4`($sp)          # new fpscr
+       lfd     $r0lo,8*4($ctx)                 # load key
+       lfd     $r0hi,8*5($ctx)
+       lfd     $r1lo,8*6($ctx)
+       lfd     $r1hi,8*7($ctx)
+       lfd     $r2lo,8*8($ctx)
+       lfd     $r2hi,8*9($ctx)
+       lfd     $r3lo,8*10($ctx)
+       lfd     $r3hi,8*11($ctx)
+       lfd     $s1lo,8*12($ctx)
+       lfd     $s1hi,8*13($ctx)
+       lfd     $s2lo,8*14($ctx)
+       lfd     $s2hi,8*15($ctx)
+       lfd     $s3lo,8*16($ctx)
+       lfd     $s3hi,8*17($ctx)
+
+       stfd    $x0,`$LOCALS+8*4`($sp)          # save original fpscr
+       mtfsf   255,$x1
+
+       addic   $len,$len,1
+       addze   r0,r0
+       slwi.   r0,r0,4
+       sub     $inp,$inp,r0                    # conditional rewind
+
+       lfd     $x0,`$LOCALS+8*0`($sp)
+       lfd     $x1,`$LOCALS+8*1`($sp)
+       lfd     $x2,`$LOCALS+8*2`($sp)
+       lfd     $x3,`$LOCALS+8*3`($sp)
+
+       fsub    $h0lo,$h0lo,$two0               # de-bias hash value
+        $LWXLE $in0,0,$inp                     # modulo-scheduled input load
+       fsub    $h1lo,$h1lo,$two32
+        $LWXLE $in1,$i1,$inp
+       fsub    $h2lo,$h2lo,$two64
+        $LWXLE $in2,$i2,$inp
+       fsub    $h3lo,$h3lo,$two96
+        $LWXLE $in3,$i3,$inp
+
+       fsub    $x0,$x0,$two0                   # de-bias input
+        addi   $inp,$inp,16
+       fsub    $x1,$x1,$two32
+       fsub    $x2,$x2,$two64
+       fsub    $x3,$x3,$two96
+
+       fadd    $x0,$x0,$h0lo                   # accumulate input
+        stw    $in0,`$LOCALS+8*0+(4^$LITTLE_ENDIAN)`($sp)
+       fadd    $x1,$x1,$h1lo
+        stw    $in1,`$LOCALS+8*1+(4^$LITTLE_ENDIAN)`($sp)
+       fadd    $x2,$x2,$h2lo
+        stw    $in2,`$LOCALS+8*2+(4^$LITTLE_ENDIAN)`($sp)
+       fadd    $x3,$x3,$h3lo
+        stw    $in3,`$LOCALS+8*3+(4^$LITTLE_ENDIAN)`($sp)
+
+       b       Lentry
+
+.align 4
+Loop:
+       fsub    $y0,$y0,$two0                   # de-bias input
+        addic  $len,$len,1
+       fsub    $y1,$y1,$two32
+        addze  r0,r0
+       fsub    $y2,$y2,$two64
+        slwi.  r0,r0,4
+       fsub    $y3,$y3,$two96
+        sub    $inp,$inp,r0                    # conditional rewind
+
+       fadd    $h0lo,$h0lo,$y0                 # accumulate input
+       fadd    $h0hi,$h0hi,$y1
+       fadd    $h2lo,$h2lo,$y2
+       fadd    $h2hi,$h2hi,$y3
+
+       ######################################### base 2^48 -> base 2^32
+       fadd    $c1lo,$h1lo,$two64
+        $LWXLE $in0,0,$inp                     # modulo-scheduled input load
+       fadd    $c1hi,$h1hi,$two64
+        $LWXLE $in1,$i1,$inp
+       fadd    $c3lo,$h3lo,$two130
+        $LWXLE $in2,$i2,$inp
+       fadd    $c3hi,$h3hi,$two130
+        $LWXLE $in3,$i3,$inp
+       fadd    $c0lo,$h0lo,$two32
+        addi   $inp,$inp,16
+       fadd    $c0hi,$h0hi,$two32
+       fadd    $c2lo,$h2lo,$two96
+       fadd    $c2hi,$h2hi,$two96
+
+       fsub    $c1lo,$c1lo,$two64
+        stw    $in0,`$LOCALS+8*0+(4^$LITTLE_ENDIAN)`($sp)      # fill "template"
+       fsub    $c1hi,$c1hi,$two64
+        stw    $in1,`$LOCALS+8*1+(4^$LITTLE_ENDIAN)`($sp)
+       fsub    $c3lo,$c3lo,$two130
+        stw    $in2,`$LOCALS+8*2+(4^$LITTLE_ENDIAN)`($sp)
+       fsub    $c3hi,$c3hi,$two130
+        stw    $in3,`$LOCALS+8*3+(4^$LITTLE_ENDIAN)`($sp)
+       fsub    $c0lo,$c0lo,$two32
+       fsub    $c0hi,$c0hi,$two32
+       fsub    $c2lo,$c2lo,$two96
+       fsub    $c2hi,$c2hi,$two96
+
+       fsub    $h1lo,$h1lo,$c1lo
+       fsub    $h1hi,$h1hi,$c1hi
+       fsub    $h3lo,$h3lo,$c3lo
+       fsub    $h3hi,$h3hi,$c3hi
+       fsub    $h2lo,$h2lo,$c2lo
+       fsub    $h2hi,$h2hi,$c2hi
+       fsub    $h0lo,$h0lo,$c0lo
+       fsub    $h0hi,$h0hi,$c0hi
+
+       fadd    $h1lo,$h1lo,$c0lo
+       fadd    $h1hi,$h1hi,$c0hi
+       fadd    $h3lo,$h3lo,$c2lo
+       fadd    $h3hi,$h3hi,$c2hi
+       fadd    $h2lo,$h2lo,$c1lo
+       fadd    $h2hi,$h2hi,$c1hi
+       fmadd   $h0lo,$c3lo,$five_two130,$h0lo
+       fmadd   $h0hi,$c3hi,$five_two130,$h0hi
+
+       fadd    $x1,$h1lo,$h1hi
+        lfd    $s1lo,8*12($ctx)                # reload constants
+       fadd    $x3,$h3lo,$h3hi
+        lfd    $s1hi,8*13($ctx)
+       fadd    $x2,$h2lo,$h2hi
+        lfd    $r3lo,8*10($ctx)
+       fadd    $x0,$h0lo,$h0hi
+        lfd    $r3hi,8*11($ctx)
+Lentry:
+       fmul    $h0lo,$s3lo,$x1
+       fmul    $h0hi,$s3hi,$x1
+       fmul    $h2lo,$r1lo,$x1
+       fmul    $h2hi,$r1hi,$x1
+       fmul    $h1lo,$r0lo,$x1
+       fmul    $h1hi,$r0hi,$x1
+       fmul    $h3lo,$r2lo,$x1
+       fmul    $h3hi,$r2hi,$x1
+
+       fmadd   $h0lo,$s1lo,$x3,$h0lo
+       fmadd   $h0hi,$s1hi,$x3,$h0hi
+       fmadd   $h2lo,$s3lo,$x3,$h2lo
+       fmadd   $h2hi,$s3hi,$x3,$h2hi
+       fmadd   $h1lo,$s2lo,$x3,$h1lo
+       fmadd   $h1hi,$s2hi,$x3,$h1hi
+       fmadd   $h3lo,$r0lo,$x3,$h3lo
+       fmadd   $h3hi,$r0hi,$x3,$h3hi
+
+       fmadd   $h0lo,$s2lo,$x2,$h0lo
+       fmadd   $h0hi,$s2hi,$x2,$h0hi
+       fmadd   $h2lo,$r0lo,$x2,$h2lo
+       fmadd   $h2hi,$r0hi,$x2,$h2hi
+       fmadd   $h1lo,$s3lo,$x2,$h1lo
+       fmadd   $h1hi,$s3hi,$x2,$h1hi
+       fmadd   $h3lo,$r1lo,$x2,$h3lo
+       fmadd   $h3hi,$r1hi,$x2,$h3hi
+
+       fmadd   $h0lo,$r0lo,$x0,$h0lo
+        lfd    $y0,`$LOCALS+8*0`($sp)          # load [biased] input
+       fmadd   $h0hi,$r0hi,$x0,$h0hi
+        lfd    $y1,`$LOCALS+8*1`($sp)
+       fmadd   $h2lo,$r2lo,$x0,$h2lo
+        lfd    $y2,`$LOCALS+8*2`($sp)
+       fmadd   $h2hi,$r2hi,$x0,$h2hi
+        lfd    $y3,`$LOCALS+8*3`($sp)
+       fmadd   $h1lo,$r1lo,$x0,$h1lo
+       fmadd   $h1hi,$r1hi,$x0,$h1hi
+       fmadd   $h3lo,$r3lo,$x0,$h3lo
+       fmadd   $h3hi,$r3hi,$x0,$h3hi
+
+       bdnz    Loop
+
+       ######################################### base 2^48 -> base 2^32
+       fadd    $c0lo,$h0lo,$two32
+       fadd    $c0hi,$h0hi,$two32
+       fadd    $c2lo,$h2lo,$two96
+       fadd    $c2hi,$h2hi,$two96
+       fadd    $c1lo,$h1lo,$two64
+       fadd    $c1hi,$h1hi,$two64
+       fadd    $c3lo,$h3lo,$two130
+       fadd    $c3hi,$h3hi,$two130
+
+       fsub    $c0lo,$c0lo,$two32
+       fsub    $c0hi,$c0hi,$two32
+       fsub    $c2lo,$c2lo,$two96
+       fsub    $c2hi,$c2hi,$two96
+       fsub    $c1lo,$c1lo,$two64
+       fsub    $c1hi,$c1hi,$two64
+       fsub    $c3lo,$c3lo,$two130
+       fsub    $c3hi,$c3hi,$two130
+
+       fsub    $h1lo,$h1lo,$c1lo
+       fsub    $h1hi,$h1hi,$c1hi
+       fsub    $h3lo,$h3lo,$c3lo
+       fsub    $h3hi,$h3hi,$c3hi
+       fsub    $h2lo,$h2lo,$c2lo
+       fsub    $h2hi,$h2hi,$c2hi
+       fsub    $h0lo,$h0lo,$c0lo
+       fsub    $h0hi,$h0hi,$c0hi
+
+       fadd    $h1lo,$h1lo,$c0lo
+       fadd    $h1hi,$h1hi,$c0hi
+       fadd    $h3lo,$h3lo,$c2lo
+       fadd    $h3hi,$h3hi,$c2hi
+       fadd    $h2lo,$h2lo,$c1lo
+       fadd    $h2hi,$h2hi,$c1hi
+       fmadd   $h0lo,$c3lo,$five_two130,$h0lo
+       fmadd   $h0hi,$c3hi,$five_two130,$h0hi
+
+       fadd    $x1,$h1lo,$h1hi
+       fadd    $x3,$h3lo,$h3hi
+       fadd    $x2,$h2lo,$h2hi
+       fadd    $x0,$h0lo,$h0hi
+
+       lfd     $h0lo,`$LOCALS+8*4`($sp)        # pull saved fpscr
+       fadd    $x1,$x1,$two32                  # bias
+       fadd    $x3,$x3,$two96
+       fadd    $x2,$x2,$two64
+       fadd    $x0,$x0,$two0
+
+       stfd    $x1,8*1($ctx)                   # store [biased] hash value
+       stfd    $x3,8*3($ctx)
+       stfd    $x2,8*2($ctx)
+       stfd    $x0,8*0($ctx)
+
+       mtfsf   255,$h0lo                       # restore original fpscr
+       lfd     f14,`$FRAME-8*18`($sp)
+       lfd     f15,`$FRAME-8*17`($sp)
+       lfd     f16,`$FRAME-8*16`($sp)
+       lfd     f17,`$FRAME-8*15`($sp)
+       lfd     f18,`$FRAME-8*14`($sp)
+       lfd     f19,`$FRAME-8*13`($sp)
+       lfd     f20,`$FRAME-8*12`($sp)
+       lfd     f21,`$FRAME-8*11`($sp)
+       lfd     f22,`$FRAME-8*10`($sp)
+       lfd     f23,`$FRAME-8*9`($sp)
+       lfd     f24,`$FRAME-8*8`($sp)
+       lfd     f25,`$FRAME-8*7`($sp)
+       lfd     f26,`$FRAME-8*6`($sp)
+       lfd     f27,`$FRAME-8*5`($sp)
+       lfd     f28,`$FRAME-8*4`($sp)
+       lfd     f29,`$FRAME-8*3`($sp)
+       lfd     f30,`$FRAME-8*2`($sp)
+       lfd     f31,`$FRAME-8*1`($sp)
+       addi    $sp,$sp,$FRAME
+Labort:
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,0,4,0
+.size  .poly1305_blocks_fpu,.-.poly1305_blocks_fpu
+___
+{
+my ($mac,$nonce)=($inp,$len);
+
+my ($h0,$h1,$h2,$h3,$h4, $d0,$d1,$d2,$d3
+   ) = map("r$_",(7..11,28..31));
+my $mask = "r0";
+my $FRAME = (6+4)*$SIZE_T;
+
+$code.=<<___;
+.globl .poly1305_emit_fpu
+.align 4
+.poly1305_emit_fpu:
+       $STU    $sp,-$FRAME($sp)
+       mflr    r0
+       $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
+       $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
+       $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
+       $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
+       $PUSH   r0,`$FRAME+$LRSAVE`($sp)
+
+       lwz     $d0,`8*0+(0^$LITTLE_ENDIAN)`($ctx)      # load hash
+       lwz     $h0,`8*0+(4^$LITTLE_ENDIAN)`($ctx)
+       lwz     $d1,`8*1+(0^$LITTLE_ENDIAN)`($ctx)
+       lwz     $h1,`8*1+(4^$LITTLE_ENDIAN)`($ctx)
+       lwz     $d2,`8*2+(0^$LITTLE_ENDIAN)`($ctx)
+       lwz     $h2,`8*2+(4^$LITTLE_ENDIAN)`($ctx)
+       lwz     $d3,`8*3+(0^$LITTLE_ENDIAN)`($ctx)
+       lwz     $h3,`8*3+(4^$LITTLE_ENDIAN)`($ctx)
+
+       lis     $mask,0xfff0
+       andc    $d0,$d0,$mask                   # mask exponent
+       andc    $d1,$d1,$mask
+       andc    $d2,$d2,$mask
+       andc    $d3,$d3,$mask                   # can be partially reduced...
+       li      $mask,3
+
+       srwi    $padbit,$d3,2                   # ... so reduce
+       and     $h4,$d3,$mask
+       andc    $d3,$d3,$mask
+       add     $d3,$d3,$padbit
+___
+                                               if ($SIZE_T==4) {
+$code.=<<___;
+       addc    $h0,$h0,$d3
+       adde    $h1,$h1,$d0
+       adde    $h2,$h2,$d1
+       adde    $h3,$h3,$d2
+       addze   $h4,$h4
+
+       addic   $d0,$h0,5                       # compare to modulus
+       addze   $d1,$h1
+       addze   $d2,$h2
+       addze   $d3,$h3
+       addze   $mask,$h4
+
+       srwi    $mask,$mask,2                   # did it carry/borrow?
+       neg     $mask,$mask
+       srawi   $mask,$mask,31                  # mask
+
+       andc    $h0,$h0,$mask
+       and     $d0,$d0,$mask
+       andc    $h1,$h1,$mask
+       and     $d1,$d1,$mask
+       or      $h0,$h0,$d0
+       lwz     $d0,0($nonce)                   # load nonce
+       andc    $h2,$h2,$mask
+       and     $d2,$d2,$mask
+       or      $h1,$h1,$d1
+       lwz     $d1,4($nonce)
+       andc    $h3,$h3,$mask
+       and     $d3,$d3,$mask
+       or      $h2,$h2,$d2
+       lwz     $d2,8($nonce)
+       or      $h3,$h3,$d3
+       lwz     $d3,12($nonce)
+
+       addc    $h0,$h0,$d0                     # accumulate nonce
+       adde    $h1,$h1,$d1
+       adde    $h2,$h2,$d2
+       adde    $h3,$h3,$d3
+___
+                                               } else {
+$code.=<<___;
+       add     $h0,$h0,$d3
+       add     $h1,$h1,$d0
+       add     $h2,$h2,$d1
+       add     $h3,$h3,$d2
+
+       srdi    $d0,$h0,32
+       add     $h1,$h1,$d0
+       srdi    $d1,$h1,32
+       add     $h2,$h2,$d1
+       srdi    $d2,$h2,32
+       add     $h3,$h3,$d2
+       srdi    $d3,$h3,32
+       add     $h4,$h4,$d3
+
+       insrdi  $h0,$h1,32,0
+       insrdi  $h2,$h3,32,0
+
+       addic   $d0,$h0,5                       # compare to modulus
+       addze   $d1,$h2
+       addze   $d2,$h4
+
+       srdi    $mask,$d2,2                     # did it carry/borrow?
+       neg     $mask,$mask
+       sradi   $mask,$mask,63                  # mask
+       ld      $d2,0($nonce)                   # load nonce
+       ld      $d3,8($nonce)
+
+       andc    $h0,$h0,$mask
+       and     $d0,$d0,$mask
+       andc    $h2,$h2,$mask
+       and     $d1,$d1,$mask
+       or      $h0,$h0,$d0
+       or      $h2,$h2,$d1
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       rotldi  $d2,$d2,32                      # flip nonce words
+       rotldi  $d3,$d3,32
+___
+$code.=<<___;
+       addc    $h0,$h0,$d2                     # accumulate nonce
+       adde    $h2,$h2,$d3
+
+       srdi    $h1,$h0,32
+       srdi    $h3,$h2,32
+___
+                                               }
+$code.=<<___   if ($LITTLE_ENDIAN);
+       stw     $h0,0($mac)                     # write result
+       stw     $h1,4($mac)
+       stw     $h2,8($mac)
+       stw     $h3,12($mac)
+___
+$code.=<<___   if (!$LITTLE_ENDIAN);
+       li      $d1,4
+       stwbrx  $h0,0,$mac                      # write result
+       li      $d2,8
+       stwbrx  $h1,$d1,$mac
+       li      $d3,12
+       stwbrx  $h2,$d2,$mac
+       stwbrx  $h3,$d3,$mac
+___
+$code.=<<___;
+       $POP    r28,`$FRAME-$SIZE_T*4`($sp)
+       $POP    r29,`$FRAME-$SIZE_T*3`($sp)
+       $POP    r30,`$FRAME-$SIZE_T*2`($sp)
+       $POP    r31,`$FRAME-$SIZE_T*1`($sp)
+       addi    $sp,$sp,$FRAME
+       blr
+       .long   0
+       .byte   0,12,4,1,0x80,4,3,0
+.size  .poly1305_emit_fpu,.-.poly1305_emit_fpu
+___
+}
+# Ugly hack here, because PPC assembler syntax seem to vary too
+# much from platforms to platform...
+$code.=<<___;
+.align 6
+LPICmeup:
+       mflr    r0
+       bcl     20,31,\$+4
+       mflr    $len    # vvvvvv "distance" between . and 1st data entry
+       addi    $len,$len,`64-8`        # borrow $len
+       mtlr    r0
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,0,0
+       .space  `64-9*4`
+
+.quad  0x4330000000000000              # 2^(52+0)
+.quad  0x4530000000000000              # 2^(52+32)
+.quad  0x4730000000000000              # 2^(52+64)
+.quad  0x4930000000000000              # 2^(52+96)
+.quad  0x4b50000000000000              # 2^(52+130)
+
+.quad  0x37f4000000000000              # 5/2^130
+
+.quad  0x4430000000000000              # 2^(52+16+0)
+.quad  0x4630000000000000              # 2^(52+16+32)
+.quad  0x4830000000000000              # 2^(52+16+64)
+.quad  0x4a30000000000000              # 2^(52+16+96)
+.quad  0x3e30000000000000              # 2^(52+16+0-96)
+.quad  0x4030000000000000              # 2^(52+16+32-96)
+.quad  0x4230000000000000              # 2^(52+16+64-96)
+
+.quad  0x0000000000000001              # fpscr: truncate, no exceptions
+.asciz "Poly1305 for PPC FPU, CRYPTOGAMS by <appro\@openssl.org>"
+.align 4
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
index 0b59b9f..07a63d9 100644 (file)
@@ -9,6 +9,10 @@ BEGINRAW[Makefile(unix)]
        $(PERL) {- $sourcedir -}/asm/poly1305-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
 {- $builddir -}/poly1305-x86_64.s:     {- $sourcedir -}/asm/poly1305-x86_64.pl
        $(PERL) {- $sourcedir -}/asm/poly1305-x86_64.pl $(PERLASM_SCHEME) > $@
+{- $builddir -}/poly1305-ppc.s:                {- $sourcedir -}/asm/poly1305-ppc.pl
+       $(PERL) {- $sourcedir -}/asm/poly1305-ppc.pl $(PERLASM_SCHEME) $@
+{- $builddir -}/poly1305-ppcfp.s:      {- $sourcedir -}/asm/poly1305-ppcfp.pl
+       $(PERL) {- $sourcedir -}/asm/poly1305-ppcfp.pl $(PERLASM_SCHEME) $@
 
 {- $builddir -}/poly1305-%.S:  {- $sourcedir -}/asm/poly1305-%.pl
        $(PERL) $< $(PERLASM_SCHEME) $@