PPC assembly pack: make new .size directives profiler-friendly.
[openssl.git] / crypto / sha / asm / sha512-ppc.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # I let hardware handle unaligned input, except on page boundaries
11 # (see below for details). Otherwise straightforward implementation
12 # with X vector in register bank. The module is big-endian [which is
13 # not big deal as there're no little-endian targets left around].
14
15 #                       sha256          |       sha512
16 #                       -m64    -m32    |       -m64    -m32
17 # --------------------------------------+-----------------------
18 # PPC970,gcc-4.0.0      +50%    +38%    |       +40%    +410%(*)
19 # Power6,xlc-7          +150%   +90%    |       +100%   +430%(*)
20 #
21 # (*)   64-bit code in 32-bit application context, which actually is
22 #       on TODO list. It should be noted that for safe deployment in
23 #       32-bit *mutli-threaded* context asyncronous signals should be
24 #       blocked upon entry to SHA512 block routine. This is because
25 #       32-bit signaling procedure invalidates upper halves of GPRs.
26 #       Context switch procedure preserves them, but not signaling:-(
27
28 # Second version is true multi-thread safe. Trouble with the original
29 # version was that it was using thread local storage pointer register.
30 # Well, it scrupulously preserved it, but the problem would arise the
31 # moment asynchronous signal was delivered and signal handler would
32 # dereference the TLS pointer. While it's never the case in openssl
33 # application or test suite, we have to respect this scenario and not
34 # use TLS pointer register. Alternative would be to require caller to
35 # block signals prior calling this routine. For the record, in 32-bit
36 # context R2 serves as TLS pointer, while in 64-bit context - R13.
37
38 $flavour=shift;
39 $output =shift;
40
41 if ($flavour =~ /64/) {
42         $SIZE_T=8;
43         $LRSAVE=2*$SIZE_T;
44         $STU="stdu";
45         $UCMP="cmpld";
46         $SHL="sldi";
47         $POP="ld";
48         $PUSH="std";
49 } elsif ($flavour =~ /32/) {
50         $SIZE_T=4;
51         $LRSAVE=$SIZE_T;
52         $STU="stwu";
53         $UCMP="cmplw";
54         $SHL="slwi";
55         $POP="lwz";
56         $PUSH="stw";
57 } else { die "nonsense $flavour"; }
58
59 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
60 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
61 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
62 die "can't locate ppc-xlate.pl";
63
64 open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
65
66 if ($output =~ /512/) {
67         $func="sha512_block_data_order";
68         $SZ=8;
69         @Sigma0=(28,34,39);
70         @Sigma1=(14,18,41);
71         @sigma0=(1,  8, 7);
72         @sigma1=(19,61, 6);
73         $rounds=80;
74         $LD="ld";
75         $ST="std";
76         $ROR="rotrdi";
77         $SHR="srdi";
78 } else {
79         $func="sha256_block_data_order";
80         $SZ=4;
81         @Sigma0=( 2,13,22);
82         @Sigma1=( 6,11,25);
83         @sigma0=( 7,18, 3);
84         @sigma1=(17,19,10);
85         $rounds=64;
86         $LD="lwz";
87         $ST="stw";
88         $ROR="rotrwi";
89         $SHR="srwi";
90 }
91
92 $FRAME=32*$SIZE_T+16*$SZ;
93 $LOCALS=6*$SIZE_T;
94
95 $sp ="r1";
96 $toc="r2";
97 $ctx="r3";      # zapped by $a0
98 $inp="r4";      # zapped by $a1
99 $num="r5";      # zapped by $t0
100
101 $T  ="r0";
102 $a0 ="r3";
103 $a1 ="r4";
104 $t0 ="r5";
105 $t1 ="r6";
106 $Tbl="r7";
107
108 $A  ="r8";
109 $B  ="r9";
110 $C  ="r10";
111 $D  ="r11";
112 $E  ="r12";
113 $F  =$t1;       $t1 = "r0";     # stay away from "r13";
114 $G  ="r14";
115 $H  ="r15";
116
117 @V=($A,$B,$C,$D,$E,$F,$G,$H);
118 @X=("r16","r17","r18","r19","r20","r21","r22","r23",
119     "r24","r25","r26","r27","r28","r29","r30","r31");
120
121 $inp="r31" if($SZ==4 || $SIZE_T==8);    # reassigned $inp! aliases with @X[15]
122
123 sub ROUND_00_15 {
124 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
125 $code.=<<___;
126         $ROR    $a0,$e,$Sigma1[0]
127         $ROR    $a1,$e,$Sigma1[1]
128         and     $t0,$f,$e
129         xor     $a0,$a0,$a1
130         add     $h,$h,$t1
131         andc    $t1,$g,$e
132         $ROR    $a1,$a1,`$Sigma1[2]-$Sigma1[1]`
133         or      $t0,$t0,$t1             ; Ch(e,f,g)
134         add     $h,$h,@X[$i%16]
135         xor     $a0,$a0,$a1             ; Sigma1(e)
136         add     $h,$h,$t0
137         add     $h,$h,$a0
138
139         $ROR    $a0,$a,$Sigma0[0]
140         $ROR    $a1,$a,$Sigma0[1]
141         and     $t0,$a,$b
142         and     $t1,$a,$c
143         xor     $a0,$a0,$a1
144         $ROR    $a1,$a1,`$Sigma0[2]-$Sigma0[1]`
145         xor     $t0,$t0,$t1
146         and     $t1,$b,$c
147         xor     $a0,$a0,$a1             ; Sigma0(a)
148         add     $d,$d,$h
149         xor     $t0,$t0,$t1             ; Maj(a,b,c)
150 ___
151 $code.=<<___ if ($i<15);
152         $LD     $t1,`($i+1)*$SZ`($Tbl)
153 ___
154 $code.=<<___;
155         add     $h,$h,$a0
156         add     $h,$h,$t0
157
158 ___
159 }
160
161 sub ROUND_16_xx {
162 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
163 $i-=16;
164 $code.=<<___;
165         $ROR    $a0,@X[($i+1)%16],$sigma0[0]
166         $ROR    $a1,@X[($i+1)%16],$sigma0[1]
167         $ROR    $t0,@X[($i+14)%16],$sigma1[0]
168         $ROR    $t1,@X[($i+14)%16],$sigma1[1]
169         xor     $a0,$a0,$a1
170         $SHR    $a1,@X[($i+1)%16],$sigma0[2]
171         xor     $t0,$t0,$t1
172         $SHR    $t1,@X[($i+14)%16],$sigma1[2]
173         add     @X[$i],@X[$i],@X[($i+9)%16]
174         xor     $a0,$a0,$a1             ; sigma0(X[(i+1)&0x0f])
175         xor     $t0,$t0,$t1             ; sigma1(X[(i+14)&0x0f])
176         $LD     $t1,`$i*$SZ`($Tbl)
177         add     @X[$i],@X[$i],$a0
178         add     @X[$i],@X[$i],$t0
179 ___
180 &ROUND_00_15($i+16,$a,$b,$c,$d,$e,$f,$g,$h);
181 }
182
183 $code=<<___;
184 .machine        "any"
185 .text
186
187 .globl  $func
188 .align  6
189 $func:
190         $STU    $sp,-$FRAME($sp)
191         mflr    r0
192         $SHL    $num,$num,`log(16*$SZ)/log(2)`
193
194         $PUSH   $ctx,`$FRAME-$SIZE_T*22`($sp)
195
196         $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
197         $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
198         $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
199         $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
200         $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
201         $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
202         $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
203         $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
204         $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
205         $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
206         $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
207         $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
208         $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
209         $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
210         $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
211         $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
212         $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
213         $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
214         $PUSH   r0,`$FRAME+$LRSAVE`($sp)
215 ___
216
217 if ($SZ==4 || $SIZE_T==8) {
218 $code.=<<___;
219         $LD     $A,`0*$SZ`($ctx)
220         mr      $inp,r4                         ; incarnate $inp
221         $LD     $B,`1*$SZ`($ctx)
222         $LD     $C,`2*$SZ`($ctx)
223         $LD     $D,`3*$SZ`($ctx)
224         $LD     $E,`4*$SZ`($ctx)
225         $LD     $F,`5*$SZ`($ctx)
226         $LD     $G,`6*$SZ`($ctx)
227         $LD     $H,`7*$SZ`($ctx)
228 ___
229 } else {
230   for ($i=16;$i<32;$i++) {
231     $code.=<<___;
232         lwz     r$i,`4*($i-16)`($ctx)
233 ___
234   }
235 }
236
237 $code.=<<___;
238         bl      LPICmeup
239 LPICedup:
240         andi.   r0,$inp,3
241         bne     Lunaligned
242 Laligned:
243         add     $num,$inp,$num
244         $PUSH   $num,`$FRAME-$SIZE_T*24`($sp)   ; end pointer
245         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
246         bl      Lsha2_block_private
247         b       Ldone
248
249 ; PowerPC specification allows an implementation to be ill-behaved
250 ; upon unaligned access which crosses page boundary. "Better safe
251 ; than sorry" principle makes me treat it specially. But I don't
252 ; look for particular offending word, but rather for the input
253 ; block which crosses the boundary. Once found that block is aligned
254 ; and hashed separately...
255 .align  4
256 Lunaligned:
257         subfic  $t1,$inp,4096
258         andi.   $t1,$t1,`4096-16*$SZ`   ; distance to closest page boundary
259         beq     Lcross_page
260         $UCMP   $num,$t1
261         ble-    Laligned                ; didn't cross the page boundary
262         subfc   $num,$t1,$num
263         add     $t1,$inp,$t1
264         $PUSH   $num,`$FRAME-$SIZE_T*25`($sp)   ; save real remaining num
265         $PUSH   $t1,`$FRAME-$SIZE_T*24`($sp)    ; intermediate end pointer
266         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
267         bl      Lsha2_block_private
268         ; $inp equals to the intermediate end pointer here
269         $POP    $num,`$FRAME-$SIZE_T*25`($sp)   ; restore real remaining num
270 Lcross_page:
271         li      $t1,`16*$SZ/4`
272         mtctr   $t1
273 ___
274 if ($SZ==4 || $SIZE_T==8) {
275 $code.=<<___;
276         addi    r20,$sp,$LOCALS                 ; aligned spot below the frame
277 Lmemcpy:
278         lbz     r16,0($inp)
279         lbz     r17,1($inp)
280         lbz     r18,2($inp)
281         lbz     r19,3($inp)
282         addi    $inp,$inp,4
283         stb     r16,0(r20)
284         stb     r17,1(r20)
285         stb     r18,2(r20)
286         stb     r19,3(r20)
287         addi    r20,r20,4
288         bdnz    Lmemcpy
289 ___
290 } else {
291 $code.=<<___;
292         addi    r12,$sp,$LOCALS                 ; aligned spot below the frame
293 Lmemcpy:
294         lbz     r8,0($inp)
295         lbz     r9,1($inp)
296         lbz     r10,2($inp)
297         lbz     r11,3($inp)
298         addi    $inp,$inp,4
299         stb     r8,0(r12)
300         stb     r9,1(r12)
301         stb     r10,2(r12)
302         stb     r11,3(r12)
303         addi    r12,r12,4
304         bdnz    Lmemcpy
305 ___
306 }
307
308 $code.=<<___;
309         $PUSH   $inp,`$FRAME-$SIZE_T*26`($sp)   ; save real inp
310         addi    $t1,$sp,`$LOCALS+16*$SZ`        ; fictitious end pointer
311         addi    $inp,$sp,$LOCALS                ; fictitious inp pointer
312         $PUSH   $num,`$FRAME-$SIZE_T*25`($sp)   ; save real num
313         $PUSH   $t1,`$FRAME-$SIZE_T*24`($sp)    ; end pointer
314         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
315         bl      Lsha2_block_private
316         $POP    $inp,`$FRAME-$SIZE_T*26`($sp)   ; restore real inp
317         $POP    $num,`$FRAME-$SIZE_T*25`($sp)   ; restore real num
318         addic.  $num,$num,`-16*$SZ`             ; num--
319         bne-    Lunaligned
320
321 Ldone:
322         $POP    r0,`$FRAME+$LRSAVE`($sp)
323         $POP    r14,`$FRAME-$SIZE_T*18`($sp)
324         $POP    r15,`$FRAME-$SIZE_T*17`($sp)
325         $POP    r16,`$FRAME-$SIZE_T*16`($sp)
326         $POP    r17,`$FRAME-$SIZE_T*15`($sp)
327         $POP    r18,`$FRAME-$SIZE_T*14`($sp)
328         $POP    r19,`$FRAME-$SIZE_T*13`($sp)
329         $POP    r20,`$FRAME-$SIZE_T*12`($sp)
330         $POP    r21,`$FRAME-$SIZE_T*11`($sp)
331         $POP    r22,`$FRAME-$SIZE_T*10`($sp)
332         $POP    r23,`$FRAME-$SIZE_T*9`($sp)
333         $POP    r24,`$FRAME-$SIZE_T*8`($sp)
334         $POP    r25,`$FRAME-$SIZE_T*7`($sp)
335         $POP    r26,`$FRAME-$SIZE_T*6`($sp)
336         $POP    r27,`$FRAME-$SIZE_T*5`($sp)
337         $POP    r28,`$FRAME-$SIZE_T*4`($sp)
338         $POP    r29,`$FRAME-$SIZE_T*3`($sp)
339         $POP    r30,`$FRAME-$SIZE_T*2`($sp)
340         $POP    r31,`$FRAME-$SIZE_T*1`($sp)
341         mtlr    r0
342         addi    $sp,$sp,$FRAME
343         blr
344         .long   0
345         .byte   0,12,4,1,0x80,18,3,0
346         .long   0
347 ___
348
349 if ($SZ==4 || $SIZE_T==8) {
350 $code.=<<___;
351 .align  4
352 Lsha2_block_private:
353         $LD     $t1,0($Tbl)
354 ___
355 for($i=0;$i<16;$i++) {
356 $code.=<<___ if ($SZ==4);
357         lwz     @X[$i],`$i*$SZ`($inp)
358 ___
359 # 64-bit loads are split to 2x32-bit ones, as CPU can't handle
360 # unaligned 64-bit loads, only 32-bit ones...
361 $code.=<<___ if ($SZ==8);
362         lwz     $t0,`$i*$SZ`($inp)
363         lwz     @X[$i],`$i*$SZ+4`($inp)
364         insrdi  @X[$i],$t0,32,0
365 ___
366         &ROUND_00_15($i,@V);
367         unshift(@V,pop(@V));
368 }
369 $code.=<<___;
370         li      $t0,`$rounds/16-1`
371         mtctr   $t0
372 .align  4
373 Lrounds:
374         addi    $Tbl,$Tbl,`16*$SZ`
375 ___
376 for(;$i<32;$i++) {
377         &ROUND_16_xx($i,@V);
378         unshift(@V,pop(@V));
379 }
380 $code.=<<___;
381         bdnz-   Lrounds
382
383         $POP    $ctx,`$FRAME-$SIZE_T*22`($sp)
384         $POP    $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
385         $POP    $num,`$FRAME-$SIZE_T*24`($sp)   ; end pointer
386         subi    $Tbl,$Tbl,`($rounds-16)*$SZ`    ; rewind Tbl
387
388         $LD     r16,`0*$SZ`($ctx)
389         $LD     r17,`1*$SZ`($ctx)
390         $LD     r18,`2*$SZ`($ctx)
391         $LD     r19,`3*$SZ`($ctx)
392         $LD     r20,`4*$SZ`($ctx)
393         $LD     r21,`5*$SZ`($ctx)
394         $LD     r22,`6*$SZ`($ctx)
395         addi    $inp,$inp,`16*$SZ`              ; advance inp
396         $LD     r23,`7*$SZ`($ctx)
397         add     $A,$A,r16
398         add     $B,$B,r17
399         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)
400         add     $C,$C,r18
401         $ST     $A,`0*$SZ`($ctx)
402         add     $D,$D,r19
403         $ST     $B,`1*$SZ`($ctx)
404         add     $E,$E,r20
405         $ST     $C,`2*$SZ`($ctx)
406         add     $F,$F,r21
407         $ST     $D,`3*$SZ`($ctx)
408         add     $G,$G,r22
409         $ST     $E,`4*$SZ`($ctx)
410         add     $H,$H,r23
411         $ST     $F,`5*$SZ`($ctx)
412         $ST     $G,`6*$SZ`($ctx)
413         $UCMP   $inp,$num
414         $ST     $H,`7*$SZ`($ctx)
415         bne     Lsha2_block_private
416         blr
417         .long   0
418         .byte   0,12,0x14,0,0,0,0,0
419 .size   $func,.-$func
420 ___
421 } else {
422 ########################################################################
423 # SHA512 for PPC32, X vector is off-loaded to stack...
424 #
425 #                       |       sha512
426 #                       |       -m32
427 # ----------------------+-----------------------
428 # PPC74x0,gcc-4.0.1     |       +48%
429 # POWER6,gcc-4.4.6      |       +124%(*)
430 # POWER7,gcc-4.4.6      |       +79%(*)
431 # e300,gcc-4.1.0        |       +167%
432 #
433 # (*)   ~1/3 of -m64 result [and ~20% better than -m32 code generated
434 #       by xlc-12.1]
435
436 my $XOFF=$LOCALS;
437
438 my @V=map("r$_",(16..31));      # A..H
439
440 my ($s0,$s1,$t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("r$_",(0,5,6,8..12,14,15));
441 my ($x0,$x1)=("r3","r4");       # zaps $ctx and $inp
442
443 sub ROUND_00_15_ppc32 {
444 my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
445         $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
446
447 $code.=<<___;
448         lwz     $t2,`$SZ*($i%16)+4`($Tbl)
449          xor    $a0,$flo,$glo
450         lwz     $t3,`$SZ*($i%16)+0`($Tbl)
451          xor    $a1,$fhi,$ghi
452         addc    $hlo,$hlo,$t0                   ; h+=x[i]
453         stw     $t0,`$XOFF+0+$SZ*($i%16)`($sp)  ; save x[i]
454
455         srwi    $s0,$elo,$Sigma1[0]
456         srwi    $s1,$ehi,$Sigma1[0]
457          and    $a0,$a0,$elo
458         adde    $hhi,$hhi,$t1
459          and    $a1,$a1,$ehi
460         stw     $t1,`$XOFF+4+$SZ*($i%16)`($sp)
461         srwi    $t0,$elo,$Sigma1[1]
462         srwi    $t1,$ehi,$Sigma1[1]
463          addc   $hlo,$hlo,$t2                   ; h+=K512[i]
464         insrwi  $s0,$ehi,$Sigma1[0],0
465         insrwi  $s1,$elo,$Sigma1[0],0
466          xor    $a0,$a0,$glo                    ; Ch(e,f,g)
467          adde   $hhi,$hhi,$t3
468          xor    $a1,$a1,$ghi
469         insrwi  $t0,$ehi,$Sigma1[1],0
470         insrwi  $t1,$elo,$Sigma1[1],0
471          addc   $hlo,$hlo,$a0                   ; h+=Ch(e,f,g)
472         srwi    $t2,$ehi,$Sigma1[2]-32
473         srwi    $t3,$elo,$Sigma1[2]-32
474         xor     $s0,$s0,$t0
475         xor     $s1,$s1,$t1
476         insrwi  $t2,$elo,$Sigma1[2]-32,0
477         insrwi  $t3,$ehi,$Sigma1[2]-32,0
478          xor    $a0,$alo,$blo                   ; a^b, b^c in next round
479          adde   $hhi,$hhi,$a1
480          xor    $a1,$ahi,$bhi
481         xor     $s0,$s0,$t2                     ; Sigma1(e)
482         xor     $s1,$s1,$t3
483
484         srwi    $t0,$alo,$Sigma0[0]
485          and    $a2,$a2,$a0
486          addc   $hlo,$hlo,$s0                   ; h+=Sigma1(e)
487          and    $a3,$a3,$a1
488         srwi    $t1,$ahi,$Sigma0[0]
489         srwi    $s0,$ahi,$Sigma0[1]-32
490          adde   $hhi,$hhi,$s1
491         srwi    $s1,$alo,$Sigma0[1]-32
492         insrwi  $t0,$ahi,$Sigma0[0],0
493         insrwi  $t1,$alo,$Sigma0[0],0
494          xor    $a2,$a2,$blo                    ; Maj(a,b,c)
495          addc   $dlo,$dlo,$hlo                  ; d+=h
496          xor    $a3,$a3,$bhi
497         insrwi  $s0,$alo,$Sigma0[1]-32,0
498         insrwi  $s1,$ahi,$Sigma0[1]-32,0
499          adde   $dhi,$dhi,$hhi
500         srwi    $t2,$ahi,$Sigma0[2]-32
501         srwi    $t3,$alo,$Sigma0[2]-32
502         xor     $s0,$s0,$t0
503          addc   $hlo,$hlo,$a2                   ; h+=Maj(a,b,c)
504         xor     $s1,$s1,$t1
505         insrwi  $t2,$alo,$Sigma0[2]-32,0
506         insrwi  $t3,$ahi,$Sigma0[2]-32,0
507          adde   $hhi,$hhi,$a3
508 ___
509 $code.=<<___ if ($i>=15);
510         lwz     $t0,`$XOFF+0+$SZ*(($i+2)%16)`($sp)
511         lwz     $t1,`$XOFF+4+$SZ*(($i+2)%16)`($sp)
512 ___
513 $code.=<<___ if ($i<15);
514         lwz     $t1,`$SZ*($i+1)+0`($inp)
515         lwz     $t0,`$SZ*($i+1)+4`($inp)
516 ___
517 $code.=<<___;
518         xor     $s0,$s0,$t2                     ; Sigma0(a)
519         xor     $s1,$s1,$t3
520         addc    $hlo,$hlo,$s0                   ; h+=Sigma0(a)
521         adde    $hhi,$hhi,$s1
522 ___
523 $code.=<<___ if ($i==15);
524         lwz     $x0,`$XOFF+0+$SZ*(($i+1)%16)`($sp)
525         lwz     $x1,`$XOFF+4+$SZ*(($i+1)%16)`($sp)
526 ___
527 }
528 sub ROUND_16_xx_ppc32 {
529 my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
530         $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
531
532 $code.=<<___;
533         srwi    $s0,$t0,$sigma0[0]
534         srwi    $s1,$t1,$sigma0[0]
535         srwi    $t2,$t0,$sigma0[1]
536         srwi    $t3,$t1,$sigma0[1]
537         insrwi  $s0,$t1,$sigma0[0],0
538         insrwi  $s1,$t0,$sigma0[0],0
539         srwi    $a0,$t0,$sigma0[2]
540         insrwi  $t2,$t1,$sigma0[1],0
541         insrwi  $t3,$t0,$sigma0[1],0
542         insrwi  $a0,$t1,$sigma0[2],0
543         xor     $s0,$s0,$t2
544          lwz    $t2,`$XOFF+0+$SZ*(($i+14)%16)`($sp)
545         srwi    $a1,$t1,$sigma0[2]
546         xor     $s1,$s1,$t3
547          lwz    $t3,`$XOFF+4+$SZ*(($i+14)%16)`($sp)
548         xor     $a0,$a0,$s0
549          srwi   $s0,$t2,$sigma1[0]
550         xor     $a1,$a1,$s1
551          srwi   $s1,$t3,$sigma1[0]
552         addc    $x0,$x0,$a0                     ; x[i]+=sigma0(x[i+1])
553          srwi   $a0,$t3,$sigma1[1]-32
554         insrwi  $s0,$t3,$sigma1[0],0
555         insrwi  $s1,$t2,$sigma1[0],0
556         adde    $x1,$x1,$a1
557          srwi   $a1,$t2,$sigma1[1]-32
558
559         insrwi  $a0,$t2,$sigma1[1]-32,0
560         srwi    $t2,$t2,$sigma1[2]
561         insrwi  $a1,$t3,$sigma1[1]-32,0
562         insrwi  $t2,$t3,$sigma1[2],0
563         xor     $s0,$s0,$a0
564          lwz    $a0,`$XOFF+0+$SZ*(($i+9)%16)`($sp)
565         srwi    $t3,$t3,$sigma1[2]
566         xor     $s1,$s1,$a1
567          lwz    $a1,`$XOFF+4+$SZ*(($i+9)%16)`($sp)
568         xor     $s0,$s0,$t2
569          addc   $x0,$x0,$a0                     ; x[i]+=x[i+9]
570         xor     $s1,$s1,$t3
571          adde   $x1,$x1,$a1
572         addc    $x0,$x0,$s0                     ; x[i]+=sigma1(x[i+14])
573         adde    $x1,$x1,$s1
574 ___
575         ($t0,$t1,$x0,$x1) = ($x0,$x1,$t0,$t1);
576         &ROUND_00_15_ppc32(@_);
577 }
578
579 $code.=<<___;
580 .align  4
581 Lsha2_block_private:
582         lwz     $t1,0($inp)
583         xor     $a2,@V[3],@V[5]         ; B^C, magic seed
584         lwz     $t0,4($inp)
585         xor     $a3,@V[2],@V[4]
586 ___
587 for($i=0;$i<16;$i++) {
588         &ROUND_00_15_ppc32($i,@V);
589         unshift(@V,pop(@V));    unshift(@V,pop(@V));
590         ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
591 }
592 $code.=<<___;
593         li      $a0,`$rounds/16-1`
594         mtctr   $a0
595 .align  4
596 Lrounds:
597         addi    $Tbl,$Tbl,`16*$SZ`
598 ___
599 for(;$i<32;$i++) {
600         &ROUND_16_xx_ppc32($i,@V);
601         unshift(@V,pop(@V));    unshift(@V,pop(@V));
602         ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
603 }
604 $code.=<<___;
605         bdnz-   Lrounds
606
607         $POP    $ctx,`$FRAME-$SIZE_T*22`($sp)
608         $POP    $inp,`$FRAME-$SIZE_T*23`($sp)   ; inp pointer
609         $POP    $num,`$FRAME-$SIZE_T*24`($sp)   ; end pointer
610         subi    $Tbl,$Tbl,`($rounds-16)*$SZ`    ; rewind Tbl
611
612         lwz     $t0,0($ctx)
613         lwz     $t1,4($ctx)
614         lwz     $t2,8($ctx)
615         lwz     $t3,12($ctx)
616         lwz     $a0,16($ctx)
617         lwz     $a1,20($ctx)
618         lwz     $a2,24($ctx)
619         addc    @V[1],@V[1],$t1
620         lwz     $a3,28($ctx)
621         adde    @V[0],@V[0],$t0
622         lwz     $t0,32($ctx)
623         addc    @V[3],@V[3],$t3
624         lwz     $t1,36($ctx)
625         adde    @V[2],@V[2],$t2
626         lwz     $t2,40($ctx)
627         addc    @V[5],@V[5],$a1
628         lwz     $t3,44($ctx)
629         adde    @V[4],@V[4],$a0
630         lwz     $a0,48($ctx)
631         addc    @V[7],@V[7],$a3
632         lwz     $a1,52($ctx)
633         adde    @V[6],@V[6],$a2
634         lwz     $a2,56($ctx)
635         addc    @V[9],@V[9],$t1
636         lwz     $a3,60($ctx)
637         adde    @V[8],@V[8],$t0
638         stw     @V[0],0($ctx)
639         stw     @V[1],4($ctx)
640         addc    @V[11],@V[11],$t3
641         stw     @V[2],8($ctx)
642         stw     @V[3],12($ctx)
643         adde    @V[10],@V[10],$t2
644         stw     @V[4],16($ctx)
645         stw     @V[5],20($ctx)
646         addc    @V[13],@V[13],$a1
647         stw     @V[6],24($ctx)
648         stw     @V[7],28($ctx)
649         adde    @V[12],@V[12],$a0
650         stw     @V[8],32($ctx)
651         stw     @V[9],36($ctx)
652         addc    @V[15],@V[15],$a3
653         stw     @V[10],40($ctx)
654         stw     @V[11],44($ctx)
655         adde    @V[14],@V[14],$a2
656         stw     @V[12],48($ctx)
657         stw     @V[13],52($ctx)
658         stw     @V[14],56($ctx)
659         stw     @V[15],60($ctx)
660
661         addi    $inp,$inp,`16*$SZ`              ; advance inp
662         $PUSH   $inp,`$FRAME-$SIZE_T*23`($sp)
663         $UCMP   $inp,$num
664         bne     Lsha2_block_private
665         blr
666         .long   0
667         .byte   0,12,0x14,0,0,0,0,0
668 .size   $func,.-$func
669 ___
670 }
671
672 # Ugly hack here, because PPC assembler syntax seem to vary too
673 # much from platforms to platform...
674 $code.=<<___;
675 .align  6
676 LPICmeup:
677         mflr    r0
678         bcl     20,31,\$+4
679         mflr    $Tbl    ; vvvvvv "distance" between . and 1st data entry
680         addi    $Tbl,$Tbl,`64-8`
681         mtlr    r0
682         blr
683         .long   0
684         .byte   0,12,0x14,0,0,0,0,0
685         .space  `64-9*4`
686 ___
687 $code.=<<___ if ($SZ==8);
688         .long   0x428a2f98,0xd728ae22,0x71374491,0x23ef65cd
689         .long   0xb5c0fbcf,0xec4d3b2f,0xe9b5dba5,0x8189dbbc
690         .long   0x3956c25b,0xf348b538,0x59f111f1,0xb605d019
691         .long   0x923f82a4,0xaf194f9b,0xab1c5ed5,0xda6d8118
692         .long   0xd807aa98,0xa3030242,0x12835b01,0x45706fbe
693         .long   0x243185be,0x4ee4b28c,0x550c7dc3,0xd5ffb4e2
694         .long   0x72be5d74,0xf27b896f,0x80deb1fe,0x3b1696b1
695         .long   0x9bdc06a7,0x25c71235,0xc19bf174,0xcf692694
696         .long   0xe49b69c1,0x9ef14ad2,0xefbe4786,0x384f25e3
697         .long   0x0fc19dc6,0x8b8cd5b5,0x240ca1cc,0x77ac9c65
698         .long   0x2de92c6f,0x592b0275,0x4a7484aa,0x6ea6e483
699         .long   0x5cb0a9dc,0xbd41fbd4,0x76f988da,0x831153b5
700         .long   0x983e5152,0xee66dfab,0xa831c66d,0x2db43210
701         .long   0xb00327c8,0x98fb213f,0xbf597fc7,0xbeef0ee4
702         .long   0xc6e00bf3,0x3da88fc2,0xd5a79147,0x930aa725
703         .long   0x06ca6351,0xe003826f,0x14292967,0x0a0e6e70
704         .long   0x27b70a85,0x46d22ffc,0x2e1b2138,0x5c26c926
705         .long   0x4d2c6dfc,0x5ac42aed,0x53380d13,0x9d95b3df
706         .long   0x650a7354,0x8baf63de,0x766a0abb,0x3c77b2a8
707         .long   0x81c2c92e,0x47edaee6,0x92722c85,0x1482353b
708         .long   0xa2bfe8a1,0x4cf10364,0xa81a664b,0xbc423001
709         .long   0xc24b8b70,0xd0f89791,0xc76c51a3,0x0654be30
710         .long   0xd192e819,0xd6ef5218,0xd6990624,0x5565a910
711         .long   0xf40e3585,0x5771202a,0x106aa070,0x32bbd1b8
712         .long   0x19a4c116,0xb8d2d0c8,0x1e376c08,0x5141ab53
713         .long   0x2748774c,0xdf8eeb99,0x34b0bcb5,0xe19b48a8
714         .long   0x391c0cb3,0xc5c95a63,0x4ed8aa4a,0xe3418acb
715         .long   0x5b9cca4f,0x7763e373,0x682e6ff3,0xd6b2b8a3
716         .long   0x748f82ee,0x5defb2fc,0x78a5636f,0x43172f60
717         .long   0x84c87814,0xa1f0ab72,0x8cc70208,0x1a6439ec
718         .long   0x90befffa,0x23631e28,0xa4506ceb,0xde82bde9
719         .long   0xbef9a3f7,0xb2c67915,0xc67178f2,0xe372532b
720         .long   0xca273ece,0xea26619c,0xd186b8c7,0x21c0c207
721         .long   0xeada7dd6,0xcde0eb1e,0xf57d4f7f,0xee6ed178
722         .long   0x06f067aa,0x72176fba,0x0a637dc5,0xa2c898a6
723         .long   0x113f9804,0xbef90dae,0x1b710b35,0x131c471b
724         .long   0x28db77f5,0x23047d84,0x32caab7b,0x40c72493
725         .long   0x3c9ebe0a,0x15c9bebc,0x431d67c4,0x9c100d4c
726         .long   0x4cc5d4be,0xcb3e42b6,0x597f299c,0xfc657e2a
727         .long   0x5fcb6fab,0x3ad6faec,0x6c44198c,0x4a475817
728 ___
729 $code.=<<___ if ($SZ==4);
730         .long   0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
731         .long   0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
732         .long   0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
733         .long   0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
734         .long   0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
735         .long   0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
736         .long   0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
737         .long   0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
738         .long   0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
739         .long   0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
740         .long   0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
741         .long   0xd192e819,0xd6990624,0xf40e3585,0x106aa070
742         .long   0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
743         .long   0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
744         .long   0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
745         .long   0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
746 ___
747
748 $code =~ s/\`([^\`]*)\`/eval $1/gem;
749 print $code;
750 close STDOUT;