sha512-ppc.pl: minimize stack frame.
[openssl.git] / crypto / sha / asm / sha512-ppc.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # I let hardware handle unaligned input, except on page boundaries
11 # (see below for details). Otherwise straightforward implementation
12 # with X vector in register bank. The module is big-endian [which is
13 # not big deal as there're no little-endian targets left around].
14
15 #                       sha256          |       sha512
16 #                       -m64    -m32    |       -m64    -m32
17 # --------------------------------------+-----------------------
18 # PPC970,gcc-4.0.0      +50%    +38%    |       +40%    +410%(*)
19 # Power6,xlc-7          +150%   +90%    |       +100%   +430%(*)
20 #
21 # (*)   64-bit code in 32-bit application context, which actually is
22 #       on TODO list. It should be noted that for safe deployment in
23 #       32-bit *mutli-threaded* context asyncronous signals should be
24 #       blocked upon entry to SHA512 block routine. This is because
25 #       32-bit signaling procedure invalidates upper halves of GPRs.
26 #       Context switch procedure preserves them, but not signaling:-(
27
28 # Second version is true multi-thread safe. Trouble with the original
29 # version was that it was using thread local storage pointer register.
30 # Well, it scrupulously preserved it, but the problem would arise the
31 # moment asynchronous signal was delivered and signal handler would
32 # dereference the TLS pointer. While it's never the case in openssl
33 # application or test suite, we have to respect this scenario and not
34 # use TLS pointer register. Alternative would be to require caller to
35 # block signals prior calling this routine. For the record, in 32-bit
36 # context R2 serves as TLS pointer, while in 64-bit context - R13.
37
38 $flavour=shift;
39 $output =shift;
40
41 if ($flavour =~ /64/) {
42         $SIZE_T=8;
43         $LRSAVE=2*$SIZE_T;
44         $STU="stdu";
45         $UCMP="cmpld";
46         $SHL="sldi";
47         $POP="ld";
48         $PUSH="std";
49 } elsif ($flavour =~ /32/) {
50         $SIZE_T=4;
51         $LRSAVE=$SIZE_T;
52         $STU="stwu";
53         $UCMP="cmplw";
54         $SHL="slwi";
55         $POP="lwz";
56         $PUSH="stw";
57 } else { die "nonsense $flavour"; }
58
59 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
60 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
61 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
62 die "can't locate ppc-xlate.pl";
63
64 open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
65
66 if ($output =~ /512/) {
67         $func="sha512_block_data_order";
68         $SZ=8;
69         @Sigma0=(28,34,39);
70         @Sigma1=(14,18,41);
71         @sigma0=(1,  8, 7);
72         @sigma1=(19,61, 6);
73         $rounds=80;
74         $LD="ld";
75         $ST="std";
76         $ROR="rotrdi";
77         $SHR="srdi";
78 } else {
79         $func="sha256_block_data_order";
80         $SZ=4;
81         @Sigma0=( 2,13,22);
82         @Sigma1=( 6,11,25);
83         @sigma0=( 7,18, 3);
84         @sigma1=(17,19,10);
85         $rounds=64;
86         $LD="lwz";
87         $ST="stw";
88         $ROR="rotrwi";
89         $SHR="srwi";
90 }
91
92 $FRAME=32*$SIZE_T+16*$SZ;
93 $LOCALS=6*$SIZE_T;
94
95 $sp ="r1";
96 $toc="r2";
97 $ctx="r3";      # zapped by $a0
98 $inp="r4";      # zapped by $a1
99 $num="r5";      # zapped by $t0
100
101 $T  ="r0";
102 $a0 ="r3";
103 $a1 ="r4";
104 $t0 ="r5";
105 $t1 ="r6";
106 $Tbl="r7";
107
108 $A  ="r8";
109 $B  ="r9";
110 $C  ="r10";
111 $D  ="r11";
112 $E  ="r12";
113 $F  =$t1;       $t1 = "r0";     # stay away from "r13";
114 $G  ="r14";
115 $H  ="r15";
116
117 @V=($A,$B,$C,$D,$E,$F,$G,$H);
118 @X=("r16","r17","r18","r19","r20","r21","r22","r23",
119     "r24","r25","r26","r27","r28","r29","r30","r31");
120
121 $inp="r31" if($SZ==4 || $SIZE_T==8);    # reassigned $inp! aliases with @X[15]
122
123 sub ROUND_00_15 {
124 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
125 $code.=<<___;
126         $ROR    $a0,$e,$Sigma1[0]
127         $ROR    $a1,$e,$Sigma1[1]
128         and     $t0,$f,$e
129         xor     $a0,$a0,$a1
130         add     $h,$h,$t1
131         andc    $t1,$g,$e
132         $ROR    $a1,$a1,`$Sigma1[2]-$Sigma1[1]`
133         or      $t0,$t0,$t1             ; Ch(e,f,g)
134         add     $h,$h,@X[$i%16]
135         xor     $a0,$a0,$a1             ; Sigma1(e)
136         add     $h,$h,$t0
137         add     $h,$h,$a0
138
139         $ROR    $a0,$a,$Sigma0[0]
140         $ROR    $a1,$a,$Sigma0[1]
141         and     $t0,$a,$b
142         and     $t1,$a,$c
143         xor     $a0,$a0,$a1
144         $ROR    $a1,$a1,`$Sigma0[2]-$Sigma0[1]`
145         xor     $t0,$t0,$t1
146         and     $t1,$b,$c
147         xor     $a0,$a0,$a1             ; Sigma0(a)
148         add     $d,$d,$h
149         xor     $t0,$t0,$t1             ; Maj(a,b,c)
150 ___
151 $code.=<<___ if ($i<15);
152         $LD     $t1,`($i+1)*$SZ`($Tbl)
153 ___
154 $code.=<<___;
155         add     $h,$h,$a0
156         add     $h,$h,$t0
157
158 ___
159 }
160
161 sub ROUND_16_xx {
162 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
163 $i-=16;
164 $code.=<<___;
165         $ROR    $a0,@X[($i+1)%16],$sigma0[0]
166         $ROR    $a1,@X[($i+1)%16],$sigma0[1]
167         $ROR    $t0,@X[($i+14)%16],$sigma1[0]
168         $ROR    $t1,@X[($i+14)%16],$sigma1[1]
169         xor     $a0,$a0,$a1
170         $SHR    $a1,@X[($i+1)%16],$sigma0[2]
171         xor     $t0,$t0,$t1
172         $SHR    $t1,@X[($i+14)%16],$sigma1[2]
173         add     @X[$i],@X[$i],@X[($i+9)%16]
174         xor     $a0,$a0,$a1             ; sigma0(X[(i+1)&0x0f])
175         xor     $t0,$t0,$t1             ; sigma1(X[(i+14)&0x0f])
176         $LD     $t1,`$i*$SZ`($Tbl)
177         add     @X[$i],@X[$i],$a0
178         add     @X[$i],@X[$i],$t0
179 ___
180 &ROUND_00_15($i+16,$a,$b,$c,$d,$e,$f,$g,$h);
181 }
182
183 $code=<<___;
184 .machine        "any"
185 .text
186
187 .globl  $func
188 .align  6
189 $func:
190         $STU    $sp,-$FRAME($sp)
191         mflr    r0
192         $SHL    $num,$num,`log(16*$SZ)/log(2)`
193
194         $PUSH   $ctx,`$FRAME-$SIZE_T*22`($sp)
195
196         $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
197         $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
198         $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
199         $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
200         $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
201         $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
202         $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
203         $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
204         $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
205         $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
206         $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
207         $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
208         $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
209         $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
210         $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
211         $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
212         $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
213         $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
214         $PUSH   r0,`$FRAME+$LRSAVE`($sp)
215 ___
216
217 if ($SZ==4 || $SIZE_T==8) {
218 $code.=<<___;
219         $LD     $A,`0*$SZ`($ctx)
220         mr      $inp,r4                         ; incarnate $inp
221         $LD     $B,`1*$SZ`($ctx)
222         $LD     $C,`2*$SZ`($ctx)
223         $LD     $D,`3*$SZ`($ctx)
224         $LD     $E,`4*$SZ`($ctx)
225         $LD     $F,`5*$SZ`($ctx)
226         $LD     $G,`6*$SZ`($ctx)
227         $LD     $H,`7*$SZ`($ctx)
228 ___
229 } else {
230   for ($i=16;$i<32;$i++) {
231     $code.=<<___;
232         lwz     r$i,`4*($i-16)`($ctx)
233 ___
234   }
235 }
236
237 $code.=<<___;
238         bl      LPICmeup
239 LPICedup:
240         andi.   r0,$inp,3
241         bne     Lunaligned
242 Laligned:
243         add     $num,$inp,$num
244         $PUSH   $num,`$FRAME-$SIZE_T*24`($sp)   ; end pointer
245         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
246         bl      Lsha2_block_private
247         b       Ldone
248
249 ; PowerPC specification allows an implementation to be ill-behaved
250 ; upon unaligned access which crosses page boundary. "Better safe
251 ; than sorry" principle makes me treat it specially. But I don't
252 ; look for particular offending word, but rather for the input
253 ; block which crosses the boundary. Once found that block is aligned
254 ; and hashed separately...
255 .align  4
256 Lunaligned:
257         subfic  $t1,$inp,4096
258         andi.   $t1,$t1,`4096-16*$SZ`   ; distance to closest page boundary
259         beq     Lcross_page
260         $UCMP   $num,$t1
261         ble-    Laligned                ; didn't cross the page boundary
262         subfc   $num,$t1,$num
263         add     $t1,$inp,$t1
264         $PUSH   $num,`$FRAME-$SIZE_T*25`($sp)   ; save real remaining num
265         $PUSH   $t1,`$FRAME-$SIZE_T*24`($sp)    ; intermediate end pointer
266         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
267         bl      Lsha2_block_private
268         ; $inp equals to the intermediate end pointer here
269         $POP    $num,`$FRAME-$SIZE_T*25`($sp)   ; restore real remaining num
270 Lcross_page:
271         li      $t1,`16*$SZ/4`
272         mtctr   $t1
273 ___
274 if ($SZ==4 || $SIZE_T==8) {
275 $code.=<<___;
276         addi    r20,$sp,$LOCALS                 ; aligned spot below the frame
277 Lmemcpy:
278         lbz     r16,0($inp)
279         lbz     r17,1($inp)
280         lbz     r18,2($inp)
281         lbz     r19,3($inp)
282         addi    $inp,$inp,4
283         stb     r16,0(r20)
284         stb     r17,1(r20)
285         stb     r18,2(r20)
286         stb     r19,3(r20)
287         addi    r20,r20,4
288         bdnz    Lmemcpy
289 ___
290 } else {
291 $code.=<<___;
292         addi    r12,$sp,$LOCALS                 ; aligned spot below the frame
293 Lmemcpy:
294         lbz     r8,0($inp)
295         lbz     r9,1($inp)
296         lbz     r10,2($inp)
297         lbz     r11,3($inp)
298         addi    $inp,$inp,4
299         stb     r8,0(r12)
300         stb     r9,1(r12)
301         stb     r10,2(r12)
302         stb     r11,3(r12)
303         addi    r12,r12,4
304         bdnz    Lmemcpy
305 ___
306 }
307
308 $code.=<<___;
309         $PUSH   $inp,`$FRAME-$SIZE_T*26`($sp)   ; save real inp
310         addi    $t1,$sp,`$LOCALS+16*$SZ`        ; fictitious end pointer
311         addi    $inp,$sp,$LOCALS                ; fictitious inp pointer
312         $PUSH   $num,`$FRAME-$SIZE_T*25`($sp)   ; save real num
313         $PUSH   $t1,`$FRAME-$SIZE_T*24`($sp)    ; end pointer
314         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
315         bl      Lsha2_block_private
316         $POP    $inp,`$FRAME-$SIZE_T*26`($sp)   ; restore real inp
317         $POP    $num,`$FRAME-$SIZE_T*25`($sp)   ; restore real num
318         addic.  $num,$num,`-16*$SZ`             ; num--
319         bne-    Lunaligned
320
321 Ldone:
322         $POP    r0,`$FRAME+$LRSAVE`($sp)
323         $POP    r14,`$FRAME-$SIZE_T*18`($sp)
324         $POP    r15,`$FRAME-$SIZE_T*17`($sp)
325         $POP    r16,`$FRAME-$SIZE_T*16`($sp)
326         $POP    r17,`$FRAME-$SIZE_T*15`($sp)
327         $POP    r18,`$FRAME-$SIZE_T*14`($sp)
328         $POP    r19,`$FRAME-$SIZE_T*13`($sp)
329         $POP    r20,`$FRAME-$SIZE_T*12`($sp)
330         $POP    r21,`$FRAME-$SIZE_T*11`($sp)
331         $POP    r22,`$FRAME-$SIZE_T*10`($sp)
332         $POP    r23,`$FRAME-$SIZE_T*9`($sp)
333         $POP    r24,`$FRAME-$SIZE_T*8`($sp)
334         $POP    r25,`$FRAME-$SIZE_T*7`($sp)
335         $POP    r26,`$FRAME-$SIZE_T*6`($sp)
336         $POP    r27,`$FRAME-$SIZE_T*5`($sp)
337         $POP    r28,`$FRAME-$SIZE_T*4`($sp)
338         $POP    r29,`$FRAME-$SIZE_T*3`($sp)
339         $POP    r30,`$FRAME-$SIZE_T*2`($sp)
340         $POP    r31,`$FRAME-$SIZE_T*1`($sp)
341         mtlr    r0
342         addi    $sp,$sp,$FRAME
343         blr
344         .long   0
345         .byte   0,12,4,1,0x80,18,3,0
346         .long   0
347 ___
348
349 if ($SZ==4 || $SIZE_T==8) {
350 $code.=<<___;
351 .align  4
352 Lsha2_block_private:
353         $LD     $t1,0($Tbl)
354 ___
355 for($i=0;$i<16;$i++) {
356 $code.=<<___ if ($SZ==4);
357         lwz     @X[$i],`$i*$SZ`($inp)
358 ___
359 # 64-bit loads are split to 2x32-bit ones, as CPU can't handle
360 # unaligned 64-bit loads, only 32-bit ones...
361 $code.=<<___ if ($SZ==8);
362         lwz     $t0,`$i*$SZ`($inp)
363         lwz     @X[$i],`$i*$SZ+4`($inp)
364         insrdi  @X[$i],$t0,32,0
365 ___
366         &ROUND_00_15($i,@V);
367         unshift(@V,pop(@V));
368 }
369 $code.=<<___;
370         li      $t0,`$rounds/16-1`
371         mtctr   $t0
372 .align  4
373 Lrounds:
374         addi    $Tbl,$Tbl,`16*$SZ`
375 ___
376 for(;$i<32;$i++) {
377         &ROUND_16_xx($i,@V);
378         unshift(@V,pop(@V));
379 }
380 $code.=<<___;
381         bdnz-   Lrounds
382
383         $POP    $ctx,`$FRAME-$SIZE_T*22`($sp)
384         $POP    $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
385         $POP    $num,`$FRAME-$SIZE_T*24`($sp)   ; end pointer
386         subi    $Tbl,$Tbl,`($rounds-16)*$SZ`    ; rewind Tbl
387
388         $LD     r16,`0*$SZ`($ctx)
389         $LD     r17,`1*$SZ`($ctx)
390         $LD     r18,`2*$SZ`($ctx)
391         $LD     r19,`3*$SZ`($ctx)
392         $LD     r20,`4*$SZ`($ctx)
393         $LD     r21,`5*$SZ`($ctx)
394         $LD     r22,`6*$SZ`($ctx)
395         addi    $inp,$inp,`16*$SZ`              ; advance inp
396         $LD     r23,`7*$SZ`($ctx)
397         add     $A,$A,r16
398         add     $B,$B,r17
399         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)
400         add     $C,$C,r18
401         $ST     $A,`0*$SZ`($ctx)
402         add     $D,$D,r19
403         $ST     $B,`1*$SZ`($ctx)
404         add     $E,$E,r20
405         $ST     $C,`2*$SZ`($ctx)
406         add     $F,$F,r21
407         $ST     $D,`3*$SZ`($ctx)
408         add     $G,$G,r22
409         $ST     $E,`4*$SZ`($ctx)
410         add     $H,$H,r23
411         $ST     $F,`5*$SZ`($ctx)
412         $ST     $G,`6*$SZ`($ctx)
413         $UCMP   $inp,$num
414         $ST     $H,`7*$SZ`($ctx)
415         bne     Lsha2_block_private
416         blr
417         .long   0
418         .byte   0,12,0x14,0,0,0,0,0
419 ___
420 } else {
421 ########################################################################
422 # SHA512 for PPC32, X vector is off-loaded to stack...
423 #
424 #                       |       sha512
425 #                       |       -m32
426 # ----------------------+-----------------------
427 # PPC74x0,gcc-4.0.1     |       +48%
428 # POWER6,gcc-4.4.6      |       +124%(*)
429 # POWER7,gcc-4.4.6      |       +79%(*)
430 # e300,gcc-4.1.0        |       +167%
431 #
432 # (*)   ~1/3 of -m64 result [and ~20% better than -m32 code generated
433 #       by xlc-12.1]
434
435 my $XOFF=$LOCALS;
436
437 my @V=map("r$_",(16..31));      # A..H
438
439 my ($s0,$s1,$t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("r$_",(0,5,6,8..12,14,15));
440 my ($x0,$x1)=("r3","r4");       # zaps $ctx and $inp
441
442 sub ROUND_00_15_ppc32 {
443 my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
444         $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
445
446 $code.=<<___;
447         lwz     $t2,`$SZ*($i%16)+4`($Tbl)
448          xor    $a0,$flo,$glo
449         lwz     $t3,`$SZ*($i%16)+0`($Tbl)
450          xor    $a1,$fhi,$ghi
451         addc    $hlo,$hlo,$t0                   ; h+=x[i]
452         stw     $t0,`$XOFF+0+$SZ*($i%16)`($sp)  ; save x[i]
453
454         srwi    $s0,$elo,$Sigma1[0]
455         srwi    $s1,$ehi,$Sigma1[0]
456          and    $a0,$a0,$elo
457         adde    $hhi,$hhi,$t1
458          and    $a1,$a1,$ehi
459         stw     $t1,`$XOFF+4+$SZ*($i%16)`($sp)
460         srwi    $t0,$elo,$Sigma1[1]
461         srwi    $t1,$ehi,$Sigma1[1]
462          addc   $hlo,$hlo,$t2                   ; h+=K512[i]
463         insrwi  $s0,$ehi,$Sigma1[0],0
464         insrwi  $s1,$elo,$Sigma1[0],0
465          xor    $a0,$a0,$glo                    ; Ch(e,f,g)
466          adde   $hhi,$hhi,$t3
467          xor    $a1,$a1,$ghi
468         insrwi  $t0,$ehi,$Sigma1[1],0
469         insrwi  $t1,$elo,$Sigma1[1],0
470          addc   $hlo,$hlo,$a0                   ; h+=Ch(e,f,g)
471         srwi    $t2,$ehi,$Sigma1[2]-32
472         srwi    $t3,$elo,$Sigma1[2]-32
473         xor     $s0,$s0,$t0
474         xor     $s1,$s1,$t1
475         insrwi  $t2,$elo,$Sigma1[2]-32,0
476         insrwi  $t3,$ehi,$Sigma1[2]-32,0
477          xor    $a0,$alo,$blo                   ; a^b, b^c in next round
478          adde   $hhi,$hhi,$a1
479          xor    $a1,$ahi,$bhi
480         xor     $s0,$s0,$t2                     ; Sigma1(e)
481         xor     $s1,$s1,$t3
482
483         srwi    $t0,$alo,$Sigma0[0]
484          and    $a2,$a2,$a0
485          addc   $hlo,$hlo,$s0                   ; h+=Sigma1(e)
486          and    $a3,$a3,$a1
487         srwi    $t1,$ahi,$Sigma0[0]
488         srwi    $s0,$ahi,$Sigma0[1]-32
489          adde   $hhi,$hhi,$s1
490         srwi    $s1,$alo,$Sigma0[1]-32
491         insrwi  $t0,$ahi,$Sigma0[0],0
492         insrwi  $t1,$alo,$Sigma0[0],0
493          xor    $a2,$a2,$blo                    ; Maj(a,b,c)
494          addc   $dlo,$dlo,$hlo                  ; d+=h
495          xor    $a3,$a3,$bhi
496         insrwi  $s0,$alo,$Sigma0[1]-32,0
497         insrwi  $s1,$ahi,$Sigma0[1]-32,0
498          adde   $dhi,$dhi,$hhi
499         srwi    $t2,$ahi,$Sigma0[2]-32
500         srwi    $t3,$alo,$Sigma0[2]-32
501         xor     $s0,$s0,$t0
502          addc   $hlo,$hlo,$a2                   ; h+=Maj(a,b,c)
503         xor     $s1,$s1,$t1
504         insrwi  $t2,$alo,$Sigma0[2]-32,0
505         insrwi  $t3,$ahi,$Sigma0[2]-32,0
506          adde   $hhi,$hhi,$a3
507 ___
508 $code.=<<___ if ($i>=15);
509         lwz     $t0,`$XOFF+0+$SZ*(($i+2)%16)`($sp)
510         lwz     $t1,`$XOFF+4+$SZ*(($i+2)%16)`($sp)
511 ___
512 $code.=<<___ if ($i<15);
513         lwz     $t1,`$SZ*($i+1)+0`($inp)
514         lwz     $t0,`$SZ*($i+1)+4`($inp)
515 ___
516 $code.=<<___;
517         xor     $s0,$s0,$t2                     ; Sigma0(a)
518         xor     $s1,$s1,$t3
519         addc    $hlo,$hlo,$s0                   ; h+=Sigma0(a)
520         adde    $hhi,$hhi,$s1
521 ___
522 $code.=<<___ if ($i==15);
523         lwz     $x0,`$XOFF+0+$SZ*(($i+1)%16)`($sp)
524         lwz     $x1,`$XOFF+4+$SZ*(($i+1)%16)`($sp)
525 ___
526 }
527 sub ROUND_16_xx_ppc32 {
528 my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
529         $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
530
531 $code.=<<___;
532         srwi    $s0,$t0,$sigma0[0]
533         srwi    $s1,$t1,$sigma0[0]
534         srwi    $t2,$t0,$sigma0[1]
535         srwi    $t3,$t1,$sigma0[1]
536         insrwi  $s0,$t1,$sigma0[0],0
537         insrwi  $s1,$t0,$sigma0[0],0
538         srwi    $a0,$t0,$sigma0[2]
539         insrwi  $t2,$t1,$sigma0[1],0
540         insrwi  $t3,$t0,$sigma0[1],0
541         insrwi  $a0,$t1,$sigma0[2],0
542         xor     $s0,$s0,$t2
543          lwz    $t2,`$XOFF+0+$SZ*(($i+14)%16)`($sp)
544         srwi    $a1,$t1,$sigma0[2]
545         xor     $s1,$s1,$t3
546          lwz    $t3,`$XOFF+4+$SZ*(($i+14)%16)`($sp)
547         xor     $a0,$a0,$s0
548          srwi   $s0,$t2,$sigma1[0]
549         xor     $a1,$a1,$s1
550          srwi   $s1,$t3,$sigma1[0]
551         addc    $x0,$x0,$a0                     ; x[i]+=sigma0(x[i+1])
552          srwi   $a0,$t3,$sigma1[1]-32
553         insrwi  $s0,$t3,$sigma1[0],0
554         insrwi  $s1,$t2,$sigma1[0],0
555         adde    $x1,$x1,$a1
556          srwi   $a1,$t2,$sigma1[1]-32
557
558         insrwi  $a0,$t2,$sigma1[1]-32,0
559         srwi    $t2,$t2,$sigma1[2]
560         insrwi  $a1,$t3,$sigma1[1]-32,0
561         insrwi  $t2,$t3,$sigma1[2],0
562         xor     $s0,$s0,$a0
563          lwz    $a0,`$XOFF+0+$SZ*(($i+9)%16)`($sp)
564         srwi    $t3,$t3,$sigma1[2]
565         xor     $s1,$s1,$a1
566          lwz    $a1,`$XOFF+4+$SZ*(($i+9)%16)`($sp)
567         xor     $s0,$s0,$t2
568          addc   $x0,$x0,$a0                     ; x[i]+=x[i+9]
569         xor     $s1,$s1,$t3
570          adde   $x1,$x1,$a1
571         addc    $x0,$x0,$s0                     ; x[i]+=sigma1(x[i+14])
572         adde    $x1,$x1,$s1
573 ___
574         ($t0,$t1,$x0,$x1) = ($x0,$x1,$t0,$t1);
575         &ROUND_00_15_ppc32(@_);
576 }
577
578 $code.=<<___;
579 .align  4
580 Lsha2_block_private:
581         lwz     $t1,0($inp)
582         xor     $a2,@V[3],@V[5]         ; B^C, magic seed
583         lwz     $t0,4($inp)
584         xor     $a3,@V[2],@V[4]
585 ___
586 for($i=0;$i<16;$i++) {
587         &ROUND_00_15_ppc32($i,@V);
588         unshift(@V,pop(@V));    unshift(@V,pop(@V));
589         ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
590 }
591 $code.=<<___;
592         li      $a0,`$rounds/16-1`
593         mtctr   $a0
594 .align  4
595 Lrounds:
596         addi    $Tbl,$Tbl,`16*$SZ`
597 ___
598 for(;$i<32;$i++) {
599         &ROUND_16_xx_ppc32($i,@V);
600         unshift(@V,pop(@V));    unshift(@V,pop(@V));
601         ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
602 }
603 $code.=<<___;
604         bdnz-   Lrounds
605
606         $POP    $ctx,`$FRAME-$SIZE_T*22`($sp)
607         $POP    $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
608         $POP    $num,`$FRAME-$SIZE_T*24`($sp)   ; end pointer
609         subi    $Tbl,$Tbl,`($rounds-16)*$SZ`    ; rewind Tbl
610
611         lwz     $t0,0($ctx)
612         lwz     $t1,4($ctx)
613         lwz     $t2,8($ctx)
614         lwz     $t3,12($ctx)
615         lwz     $a0,16($ctx)
616         lwz     $a1,20($ctx)
617         lwz     $a2,24($ctx)
618         addc    @V[1],@V[1],$t1
619         lwz     $a3,28($ctx)
620         adde    @V[0],@V[0],$t0
621         lwz     $t0,32($ctx)
622         addc    @V[3],@V[3],$t3
623         lwz     $t1,36($ctx)
624         adde    @V[2],@V[2],$t2
625         lwz     $t2,40($ctx)
626         addc    @V[5],@V[5],$a1
627         lwz     $t3,44($ctx)
628         adde    @V[4],@V[4],$a0
629         lwz     $a0,48($ctx)
630         addc    @V[7],@V[7],$a3
631         lwz     $a1,52($ctx)
632         adde    @V[6],@V[6],$a2
633         lwz     $a2,56($ctx)
634         addc    @V[9],@V[9],$t1
635         lwz     $a3,60($ctx)
636         adde    @V[8],@V[8],$t0
637         stw     @V[0],0($ctx)
638         stw     @V[1],4($ctx)
639         addc    @V[11],@V[11],$t3
640         stw     @V[2],8($ctx)
641         stw     @V[3],12($ctx)
642         adde    @V[10],@V[10],$t2
643         stw     @V[4],16($ctx)
644         stw     @V[5],20($ctx)
645         addc    @V[13],@V[13],$a1
646         stw     @V[6],24($ctx)
647         stw     @V[7],28($ctx)
648         adde    @V[12],@V[12],$a0
649         stw     @V[8],32($ctx)
650         stw     @V[9],36($ctx)
651         addc    @V[15],@V[15],$a3
652         stw     @V[10],40($ctx)
653         stw     @V[11],44($ctx)
654         adde    @V[14],@V[14],$a2
655         stw     @V[12],48($ctx)
656         stw     @V[13],52($ctx)
657         stw     @V[14],56($ctx)
658         stw     @V[15],60($ctx)
659
660         addi    $inp,$inp,`16*$SZ`              ; advance inp
661         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)
662         $UCMP   $inp,$num
663         bne     Lsha2_block_private
664         blr
665         .long   0
666         .byte   0,12,0x14,0,0,0,0,0
667 ___
668 }
669
670 # Ugly hack here, because PPC assembler syntax seem to vary too
671 # much from platforms to platform...
672 $code.=<<___;
673 .align  6
674 LPICmeup:
675         mflr    r0
676         bcl     20,31,\$+4
677         mflr    $Tbl    ; vvvvvv "distance" between . and 1st data entry
678         addi    $Tbl,$Tbl,`64-8`
679         mtlr    r0
680         blr
681         .long   0
682         .byte   0,12,0x14,0,0,0,0,0
683         .space  `64-9*4`
684 ___
685 $code.=<<___ if ($SZ==8);
686         .long   0x428a2f98,0xd728ae22,0x71374491,0x23ef65cd
687         .long   0xb5c0fbcf,0xec4d3b2f,0xe9b5dba5,0x8189dbbc
688         .long   0x3956c25b,0xf348b538,0x59f111f1,0xb605d019
689         .long   0x923f82a4,0xaf194f9b,0xab1c5ed5,0xda6d8118
690         .long   0xd807aa98,0xa3030242,0x12835b01,0x45706fbe
691         .long   0x243185be,0x4ee4b28c,0x550c7dc3,0xd5ffb4e2
692         .long   0x72be5d74,0xf27b896f,0x80deb1fe,0x3b1696b1
693         .long   0x9bdc06a7,0x25c71235,0xc19bf174,0xcf692694
694         .long   0xe49b69c1,0x9ef14ad2,0xefbe4786,0x384f25e3
695         .long   0x0fc19dc6,0x8b8cd5b5,0x240ca1cc,0x77ac9c65
696         .long   0x2de92c6f,0x592b0275,0x4a7484aa,0x6ea6e483
697         .long   0x5cb0a9dc,0xbd41fbd4,0x76f988da,0x831153b5
698         .long   0x983e5152,0xee66dfab,0xa831c66d,0x2db43210
699         .long   0xb00327c8,0x98fb213f,0xbf597fc7,0xbeef0ee4
700         .long   0xc6e00bf3,0x3da88fc2,0xd5a79147,0x930aa725
701         .long   0x06ca6351,0xe003826f,0x14292967,0x0a0e6e70
702         .long   0x27b70a85,0x46d22ffc,0x2e1b2138,0x5c26c926
703         .long   0x4d2c6dfc,0x5ac42aed,0x53380d13,0x9d95b3df
704         .long   0x650a7354,0x8baf63de,0x766a0abb,0x3c77b2a8
705         .long   0x81c2c92e,0x47edaee6,0x92722c85,0x1482353b
706         .long   0xa2bfe8a1,0x4cf10364,0xa81a664b,0xbc423001
707         .long   0xc24b8b70,0xd0f89791,0xc76c51a3,0x0654be30
708         .long   0xd192e819,0xd6ef5218,0xd6990624,0x5565a910
709         .long   0xf40e3585,0x5771202a,0x106aa070,0x32bbd1b8
710         .long   0x19a4c116,0xb8d2d0c8,0x1e376c08,0x5141ab53
711         .long   0x2748774c,0xdf8eeb99,0x34b0bcb5,0xe19b48a8
712         .long   0x391c0cb3,0xc5c95a63,0x4ed8aa4a,0xe3418acb
713         .long   0x5b9cca4f,0x7763e373,0x682e6ff3,0xd6b2b8a3
714         .long   0x748f82ee,0x5defb2fc,0x78a5636f,0x43172f60
715         .long   0x84c87814,0xa1f0ab72,0x8cc70208,0x1a6439ec
716         .long   0x90befffa,0x23631e28,0xa4506ceb,0xde82bde9
717         .long   0xbef9a3f7,0xb2c67915,0xc67178f2,0xe372532b
718         .long   0xca273ece,0xea26619c,0xd186b8c7,0x21c0c207
719         .long   0xeada7dd6,0xcde0eb1e,0xf57d4f7f,0xee6ed178
720         .long   0x06f067aa,0x72176fba,0x0a637dc5,0xa2c898a6
721         .long   0x113f9804,0xbef90dae,0x1b710b35,0x131c471b
722         .long   0x28db77f5,0x23047d84,0x32caab7b,0x40c72493
723         .long   0x3c9ebe0a,0x15c9bebc,0x431d67c4,0x9c100d4c
724         .long   0x4cc5d4be,0xcb3e42b6,0x597f299c,0xfc657e2a
725         .long   0x5fcb6fab,0x3ad6faec,0x6c44198c,0x4a475817
726 ___
727 $code.=<<___ if ($SZ==4);
728         .long   0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
729         .long   0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
730         .long   0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
731         .long   0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
732         .long   0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
733         .long   0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
734         .long   0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
735         .long   0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
736         .long   0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
737         .long   0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
738         .long   0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
739         .long   0xd192e819,0xd6990624,0xf40e3585,0x106aa070
740         .long   0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
741         .long   0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
742         .long   0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
743         .long   0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
744 ___
745
746 $code =~ s/\`([^\`]*)\`/eval $1/gem;
747 print $code;
748 close STDOUT;