704887b7abf46d3bf3f4f647114387ee5a58284c
[openssl.git] / crypto / sha / asm / sha1-ppc.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
8
9 # I let hardware handle unaligned input(*), except on page boundaries
10 # (see below for details). Otherwise straightforward implementation
11 # with X vector in register bank. The module is big-endian [which is
12 # not big deal as there're no little-endian targets left around].
13 #
14 # (*) this means that this module is inappropriate for PPC403? Does
15 #     anybody know if pre-POWER3 can sustain unaligned load?
16
17 #                       -m64    -m32
18 # ----------------------------------
19 # PPC970,gcc-4.0.0      +76%    +59%
20
21 $output = shift;
22
23 if ($output =~ /64\.s/) {
24         $SIZE_T =8;
25         $UCMP   ="cmpld";
26         $STU    ="stdu";
27         $POP    ="ld";
28         $PUSH   ="std";
29 } elsif ($output =~ /32\.s/) {
30         $SIZE_T =4;
31         $UCMP   ="cmplw";
32         $STU    ="stwu";
33         $POP    ="lwz";
34         $PUSH   ="stw";
35 } else { die "nonsense $output"; }
36
37 ( defined shift || open STDOUT,"| $^X ../perlasm/ppc-xlate.pl $output" ) ||
38         die "can't call ../perlasm/ppc-xlate.pl: $!";
39
40 $FRAME=24*$SIZE_T;
41
42 $K  ="r0";
43 $sp ="r1";
44 $toc="r2";
45 $ctx="r3";
46 $inp="r4";
47 $num="r5";
48 $t0 ="r15";
49 $t1 ="r6";
50
51 $A  ="r7";
52 $B  ="r8";
53 $C  ="r9";
54 $D  ="r10";
55 $E  ="r11";
56 $T  ="r12";
57
58 @V=($A,$B,$C,$D,$E,$T);
59 @X=("r16","r17","r18","r19","r20","r21","r22","r23",
60     "r24","r25","r26","r27","r28","r29","r30","r31");
61
62 sub BODY_00_19 {
63 my ($i,$a,$b,$c,$d,$e,$f)=@_;
64 my $j=$i+1;
65 $code.=<<___ if ($i==0);
66         lwz     @X[$i],`$i*4`($inp)
67 ___
68 $code.=<<___ if ($i<15);
69         lwz     @X[$j],`$j*4`($inp)
70         add     $f,$K,$e
71         rotlwi  $e,$a,5
72         add     $f,$f,@X[$i]
73         and     $t0,$c,$b
74         add     $f,$f,$e
75         andc    $t1,$d,$b
76         rotlwi  $b,$b,30
77         or      $t0,$t0,$t1
78         add     $f,$f,$t0
79 ___
80 $code.=<<___ if ($i>=15);
81         add     $f,$K,$e
82         rotlwi  $e,$a,5
83         xor     @X[$j%16],@X[$j%16],@X[($j+2)%16]
84         add     $f,$f,@X[$i%16]
85         and     $t0,$c,$b
86         xor     @X[$j%16],@X[$j%16],@X[($j+8)%16]
87         add     $f,$f,$e
88         andc    $t1,$d,$b
89         rotlwi  $b,$b,30
90         or      $t0,$t0,$t1
91         xor     @X[$j%16],@X[$j%16],@X[($j+13)%16]
92         add     $f,$f,$t0
93         rotlwi  @X[$j%16],@X[$j%16],1
94 ___
95 }
96
97 sub BODY_20_39 {
98 my ($i,$a,$b,$c,$d,$e,$f)=@_;
99 my $j=$i+1;
100 $code.=<<___ if ($i<79);
101         add     $f,$K,$e
102         rotlwi  $e,$a,5
103         xor     @X[$j%16],@X[$j%16],@X[($j+2)%16]
104         add     $f,$f,@X[$i%16]
105         xor     $t0,$b,$c
106         xor     @X[$j%16],@X[$j%16],@X[($j+8)%16]
107         add     $f,$f,$e
108         rotlwi  $b,$b,30
109         xor     $t0,$t0,$d
110         xor     @X[$j%16],@X[$j%16],@X[($j+13)%16]
111         add     $f,$f,$t0
112         rotlwi  @X[$j%16],@X[$j%16],1
113 ___
114 $code.=<<___ if ($i==79);
115         add     $f,$K,$e
116         rotlwi  $e,$a,5
117         lwz     r16,0($ctx)
118         add     $f,$f,@X[$i%16]
119         xor     $t0,$b,$c
120         lwz     r17,4($ctx)
121         add     $f,$f,$e
122         rotlwi  $b,$b,30
123         lwz     r18,8($ctx)
124         xor     $t0,$t0,$d
125         lwz     r19,12($ctx)
126         add     $f,$f,$t0
127         lwz     r20,16($ctx)
128 ___
129 }
130
131 sub BODY_40_59 {
132 my ($i,$a,$b,$c,$d,$e,$f)=@_;
133 my $j=$i+1;
134 $code.=<<___;
135         add     $f,$K,$e
136         rotlwi  $e,$a,5
137         xor     @X[$j%16],@X[$j%16],@X[($j+2)%16]
138         add     $f,$f,@X[$i%16]
139         and     $t0,$b,$c
140         xor     @X[$j%16],@X[$j%16],@X[($j+8)%16]
141         add     $f,$f,$e
142         or      $t1,$b,$c
143         rotlwi  $b,$b,30
144         xor     @X[$j%16],@X[$j%16],@X[($j+13)%16]
145         and     $t1,$t1,$d
146         or      $t0,$t0,$t1
147         rotlwi  @X[$j%16],@X[$j%16],1
148         add     $f,$f,$t0
149 ___
150 }
151
152 $code=<<___;
153 .text
154
155 .globl  .sha1_block_asm_data_order
156 .align  4
157 .sha1_block_asm_data_order:
158         mflr    r0
159         $STU    $sp,`-($FRAME+64)`($sp)
160         $PUSH   r0,`$FRAME-$SIZE_T*18`($sp)
161         $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
162         $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
163         $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
164         $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
165         $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
166         $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
167         $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
168         $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
169         $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
170         $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
171         $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
172         $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
173         $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
174         $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
175         $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
176         $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
177         $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
178         lwz     $A,0($ctx)
179         lwz     $B,4($ctx)
180         lwz     $C,8($ctx)
181         lwz     $D,12($ctx)
182         lwz     $E,16($ctx)
183         andi.   r0,$inp,3
184         bne     Lunaligned
185 Laligned:
186         mtctr   $num
187         bl      Lsha1_block_private
188 Ldone:
189         $POP    r0,`$FRAME-$SIZE_T*18`($sp)
190         $POP    r15,`$FRAME-$SIZE_T*17`($sp)
191         $POP    r16,`$FRAME-$SIZE_T*16`($sp)
192         $POP    r17,`$FRAME-$SIZE_T*15`($sp)
193         $POP    r18,`$FRAME-$SIZE_T*14`($sp)
194         $POP    r19,`$FRAME-$SIZE_T*13`($sp)
195         $POP    r20,`$FRAME-$SIZE_T*12`($sp)
196         $POP    r21,`$FRAME-$SIZE_T*11`($sp)
197         $POP    r22,`$FRAME-$SIZE_T*10`($sp)
198         $POP    r23,`$FRAME-$SIZE_T*9`($sp)
199         $POP    r24,`$FRAME-$SIZE_T*8`($sp)
200         $POP    r25,`$FRAME-$SIZE_T*7`($sp)
201         $POP    r26,`$FRAME-$SIZE_T*6`($sp)
202         $POP    r27,`$FRAME-$SIZE_T*5`($sp)
203         $POP    r28,`$FRAME-$SIZE_T*4`($sp)
204         $POP    r29,`$FRAME-$SIZE_T*3`($sp)
205         $POP    r30,`$FRAME-$SIZE_T*2`($sp)
206         $POP    r31,`$FRAME-$SIZE_T*1`($sp)
207         mtlr    r0
208         addi    $sp,$sp,`$FRAME+64`
209         blr
210 ___
211
212 # PowerPC specification allows an implementation to be ill-behaved
213 # upon unaligned access which crosses page boundary. "Better safe
214 # than sorry" principle makes me treat it specially. But I don't
215 # look for particular offending word, but rather for 64-byte input
216 # block which crosses the boundary. Once found that block is aligned
217 # and hashed separately...
218 $code.=<<___;
219 .align  4
220 Lunaligned:
221         subfic  $t1,$inp,4096
222         andi.   $t1,$t1,4095    ; distance to closest page boundary
223         srwi.   $t1,$t1,6       ; t1/=64
224         beq     Lcross_page
225         $UCMP   $num,$t1
226         ble-    Laligned        ; didn't cross the page boundary
227         mtctr   $t1
228         subfc   $num,$t1,$num
229         bl      Lsha1_block_private
230 Lcross_page:
231         li      $t1,16
232         mtctr   $t1
233         addi    r20,$sp,$FRAME  ; spot below the frame
234 Lmemcpy:
235         lbz     r16,0($inp)
236         lbz     r17,1($inp)
237         lbz     r18,2($inp)
238         lbz     r19,3($inp)
239         addi    $inp,$inp,4
240         stb     r16,0(r20)
241         stb     r17,1(r20)
242         stb     r18,2(r20)
243         stb     r19,3(r20)
244         addi    r20,r20,4
245         bdnz    Lmemcpy
246
247         $PUSH   $inp,`$FRAME-$SIZE_T*19`($sp)
248         li      $t1,1
249         addi    $inp,$sp,$FRAME
250         mtctr   $t1
251         bl      Lsha1_block_private
252         $POP    $inp,`$FRAME-$SIZE_T*19`($sp)
253         addic.  $num,$num,-1
254         bne-    Lunaligned
255         b       Ldone
256 ___
257
258 # This is private block function, which uses tailored calling
259 # interface, namely upon entry SHA_CTX is pre-loaded to given
260 # registers and counter register contains amount of chunks to
261 # digest...
262 $code.=<<___;
263 .align  4
264 Lsha1_block_private:
265 ___
266 $code.=<<___;   # load K_00_19
267         lis     $K,0x5a82
268         ori     $K,$K,0x7999
269 ___
270 for($i=0;$i<20;$i++)    { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
271 $code.=<<___;   # load K_20_39
272         lis     $K,0x6ed9
273         ori     $K,$K,0xeba1
274 ___
275 for(;$i<40;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
276 $code.=<<___;   # load K_40_59
277         lis     $K,0x8f1b
278         ori     $K,$K,0xbcdc
279 ___
280 for(;$i<60;$i++)        { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
281 $code.=<<___;   # load K_60_79
282         lis     $K,0xca62
283         ori     $K,$K,0xc1d6
284 ___
285 for(;$i<80;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
286 $code.=<<___;
287         add     r16,r16,$E
288         add     r17,r17,$T
289         add     r18,r18,$A
290         add     r19,r19,$B
291         add     r20,r20,$C
292         stw     r16,0($ctx)
293         mr      $A,r16
294         stw     r17,4($ctx)
295         mr      $B,r17
296         stw     r18,8($ctx)
297         mr      $C,r18
298         stw     r19,12($ctx)
299         mr      $D,r19
300         stw     r20,16($ctx)
301         mr      $E,r20
302         addi    $inp,$inp,`16*4`
303         bdnz-   Lsha1_block_private
304         blr
305 ___
306
307 $code =~ s/\`([^\`]*)\`/eval $1/gem;
308 print $code;
309 close STDOUT;