SHA1 for PowerPC.
[openssl.git] / crypto / sha / asm / sha1-ppc.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
8
9 # I let hardware handle unaligned input, except on page boundaries
10 # (see below for details). Otherwise straightforward implementation
11 # with X vector in register bank. The module is big-endian [which is
12 # not big deal as there're no little-endian targets left around].
13
14 # gcc-4.0.0     -m64    -m32
15 # --------------------------
16 # sha1          +76%    +59%
17
18 $output = shift;
19
20 if ($output =~ /64\.s/) {
21         $SIZE_T =8;
22         $RZONE  =288;
23         $UCMP   ="cmpld";
24         $STU    ="stdu";
25         $POP    ="ld";
26         $PUSH   ="std";
27 } elsif ($output =~ /32\.s/) {
28         $SIZE_T =4;
29         $RZONE  =224;
30         $UCMP   ="cmplw";
31         $STU    ="stwu";
32         $POP    ="lwz";
33         $PUSH   ="stw";
34 } else { die "nonsense $output"; }
35
36 ( defined shift || open STDOUT,"| $^X ../perlasm/ppc-xlate.pl $output" ) ||
37         die "can't call ../perlasm/ppc-xlate.pl: $!";
38
39 $FRAME=24*$SIZE_T;
40
41 $K  ="r0";
42 $sp ="r1";
43 $toc="r2";
44 $ctx="r3";
45 $inp="r4";
46 $num="r5";
47 $t0 ="r15";
48 $t1 ="r6";
49
50 $A  ="r7";
51 $B  ="r8";
52 $C  ="r9";
53 $D  ="r10";
54 $E  ="r11";
55 $T  ="r12";
56
57 @V=($A,$B,$C,$D,$E,$T);
58 @X=("r16","r17","r18","r19","r20","r21","r22","r23",
59     "r24","r25","r26","r27","r28","r29","r30","r31");
60
61 sub BODY_00_19 {
62 my ($i,$a,$b,$c,$d,$e,$f)=@_;
63 my $j=$i+1;
64 $code.=<<___ if ($i==0);
65         lwz     @X[$i],$i*4($inp)
66 ___
67 $code.=<<___ if ($i<15);
68         lwz     @X[$j],$j*4($inp)
69         add     $f,$K,$e
70         rotlwi  $e,$a,5
71         add     $f,$f,@X[$i]
72         and     $t0,$c,$b
73         add     $f,$f,$e
74         andc    $t1,$d,$b
75         rotlwi  $b,$b,30
76         or      $t0,$t0,$t1
77         add     $f,$f,$t0
78 ___
79 $code.=<<___ if ($i>=15);
80         add     $f,$K,$e
81         rotlwi  $e,$a,5
82         xor     @X[$j%16],@X[$j%16],@X[($j+2)%16]
83         add     $f,$f,@X[$i%16]
84         and     $t0,$c,$b
85         xor     @X[$j%16],@X[$j%16],@X[($j+8)%16]
86         add     $f,$f,$e
87         andc    $t1,$d,$b
88         rotlwi  $b,$b,30
89         or      $t0,$t0,$t1
90         xor     @X[$j%16],@X[$j%16],@X[($j+13)%16]
91         add     $f,$f,$t0
92         rotlwi  @X[$j%16],@X[$j%16],1
93 ___
94 }
95
96 sub BODY_20_39 {
97 my ($i,$a,$b,$c,$d,$e,$f)=@_;
98 my $j=$i+1;
99 $code.=<<___ if ($i<79);
100         add     $f,$K,$e
101         rotlwi  $e,$a,5
102         xor     @X[$j%16],@X[$j%16],@X[($j+2)%16]
103         add     $f,$f,@X[$i%16]
104         xor     $t0,$b,$c
105         xor     @X[$j%16],@X[$j%16],@X[($j+8)%16]
106         add     $f,$f,$e
107         rotlwi  $b,$b,30
108         xor     $t0,$t0,$d
109         xor     @X[$j%16],@X[$j%16],@X[($j+13)%16]
110         add     $f,$f,$t0
111         rotlwi  @X[$j%16],@X[$j%16],1
112 ___
113 $code.=<<___ if ($i==79);
114         add     $f,$K,$e
115         rotlwi  $e,$a,5
116         lwz     r16,0($ctx)
117         add     $f,$f,@X[$i%16]
118         xor     $t0,$b,$c
119         lwz     r17,4($ctx)
120         add     $f,$f,$e
121         rotlwi  $b,$b,30
122         lwz     r18,8($ctx)
123         xor     $t0,$t0,$d
124         lwz     r19,12($ctx)
125         add     $f,$f,$t0
126         lwz     r20,16($ctx)
127 ___
128 }
129
130 sub BODY_40_59 {
131 my ($i,$a,$b,$c,$d,$e,$f)=@_;
132 my $j=$i+1;
133 $code.=<<___;
134         add     $f,$K,$e
135         rotlwi  $e,$a,5
136         xor     @X[$j%16],@X[$j%16],@X[($j+2)%16]
137         add     $f,$f,@X[$i%16]
138         and     $t0,$b,$c
139         xor     @X[$j%16],@X[$j%16],@X[($j+8)%16]
140         add     $f,$f,$e
141         or      $t1,$b,$c
142         rotlwi  $b,$b,30
143         xor     @X[$j%16],@X[$j%16],@X[($j+13)%16]
144         and     $t1,$t1,$d
145         or      $t0,$t0,$t1
146         rotlwi  @X[$j%16],@X[$j%16],1
147         add     $f,$f,$t0
148 ___
149 }
150
151 $code=<<___;
152 .text
153
154 .globl  .sha1_block_asm_data_order
155 .align  4
156 .sha1_block_asm_data_order:
157         mflr    r0
158         $STU    $sp,`-($FRAME+64+$RZONE)`($sp)
159         $PUSH   r0,`$FRAME-$SIZE_T*18`($sp)
160         $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
161         $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
162         $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
163         $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
164         $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
165         $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
166         $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
167         $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
168         $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
169         $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
170         $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
171         $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
172         $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
173         $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
174         $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
175         $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
176         $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
177         lwz     $A,0($ctx)
178         lwz     $B,4($ctx)
179         lwz     $C,8($ctx)
180         lwz     $D,12($ctx)
181         lwz     $E,16($ctx)
182         andi.   r0,$inp,3
183         bne     Lunaligned
184 Laligned:
185         mtctr   $num
186         bl      Lsha1_block_private
187 Ldone:
188         $POP    r0,`$FRAME-$SIZE_T*18`($sp)
189         $POP    r15,`$FRAME-$SIZE_T*17`($sp)
190         $POP    r16,`$FRAME-$SIZE_T*16`($sp)
191         $POP    r17,`$FRAME-$SIZE_T*15`($sp)
192         $POP    r18,`$FRAME-$SIZE_T*14`($sp)
193         $POP    r19,`$FRAME-$SIZE_T*13`($sp)
194         $POP    r20,`$FRAME-$SIZE_T*12`($sp)
195         $POP    r21,`$FRAME-$SIZE_T*11`($sp)
196         $POP    r22,`$FRAME-$SIZE_T*10`($sp)
197         $POP    r23,`$FRAME-$SIZE_T*9`($sp)
198         $POP    r24,`$FRAME-$SIZE_T*8`($sp)
199         $POP    r25,`$FRAME-$SIZE_T*7`($sp)
200         $POP    r26,`$FRAME-$SIZE_T*6`($sp)
201         $POP    r27,`$FRAME-$SIZE_T*5`($sp)
202         $POP    r28,`$FRAME-$SIZE_T*4`($sp)
203         $POP    r29,`$FRAME-$SIZE_T*3`($sp)
204         $POP    r30,`$FRAME-$SIZE_T*2`($sp)
205         $POP    r31,`$FRAME-$SIZE_T*1`($sp)
206         mtlr    r0
207         addi    $sp,$sp,`$FRAME+64+$RZONE`
208         blr
209 ___
210
211 # PowerPC specification allows an implementation to be ill-behaved
212 # upon unaligned access which crosses page boundary. "Better safe
213 # than sorry" principle makes me treat it specially. But I don't
214 # look for particular offending word, but rather for 64-byte input
215 # block which crosses the boundary. Once found that block is aligned
216 # and hashed separately...
217 $code.=<<___;
218 .align  4
219 Lunaligned:
220         li      $t1,4096
221         subf    $t1,$inp,$t1
222         andi.   $t1,$t1,4095    ; distance to closest page boundary
223         srwi.   $t1,$t1,6       ; t1/=64
224         beq     Lcross_page
225         $UCMP   $num,$t1
226         ble-    Laligned        ; didn't cross the page boundary
227         mtctr   $t1
228         subf    $num,$t1,$num
229         bl      Lsha1_block_private
230 Lcross_page:
231         li      $t1,16
232         mtctr   $t1
233         addi    r20,$sp,$FRAME  ; spot below the frame
234 Lmemcpy:
235         lbz     r16,0($inp)
236         lbz     r17,1($inp)
237         lbz     r18,2($inp)
238         lbz     r19,3($inp)
239         addi    $inp,$inp,4
240         stb     r16,0(r20)
241         stb     r17,1(r20)
242         stb     r18,2(r20)
243         stb     r19,3(r20)
244         addi    r20,r20,4
245         bdnz    Lmemcpy
246
247         $PUSH   $inp,`$FRAME-$SIZE_T*19`($sp)
248         li      $t1,1
249         addi    $inp,$sp,$FRAME
250         mtctr   $t1
251         bl      Lsha1_block_private
252         $POP    $inp,`$FRAME-$SIZE_T*19`($sp)
253         addic.  $num,$num,-1
254         bne-    Lunaligned
255         b       Ldone
256 ___
257
258 # This is private block function, which uses tailored calling
259 # interface, namely upon entry SHA_CTX is pre-loaded to given
260 # registers and counter register contains amount of chunks to
261 # digest...
262 $code.=<<___;
263 .align  4
264 Lsha1_block_private:
265 ___
266 $code.=<<___;   # load K_00_19
267         lis     $K,0x5a82
268         ori     $K,$K,0x7999
269 ___
270 for($i=0;$i<20;$i++)    { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
271 $code.=<<___;   # load K_20_39
272         lis     $K,0x6ed9
273         ori     $K,$K,0xeba1
274 ___
275 for(;$i<40;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
276 $code.=<<___;   # load K_40_59
277         lis     $K,0x8f1b
278         ori     $K,$K,0xbcdc
279 ___
280 for(;$i<60;$i++)        { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
281 $code.=<<___;   # load K_60_79
282         lis     $K,0xca62
283         ori     $K,$K,0xc1d6
284 ___
285 for(;$i<80;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
286 $code.=<<___;
287         add     r16,r16,$E
288         add     r17,r17,$T
289         add     r18,r18,$A
290         add     r19,r19,$B
291         add     r20,r20,$C
292         stw     r16,0($ctx)
293         mr      $A,r16
294         stw     r17,4($ctx)
295         mr      $B,r17
296         stw     r18,8($ctx)
297         mr      $C,r18
298         stw     r19,12($ctx)
299         mr      $D,r19
300         stw     r20,16($ctx)
301         mr      $E,r20
302         addi    $inp,$inp,`16*4`
303         bdnz-   Lsha1_block_private
304         blr
305 ___
306
307 $code =~ s/\`([^\`]*)\`/eval $1/gem;
308 print $code;
309 close STDOUT;