3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # sha1_block procedure for ARMv4.
14 # Size/performance trade-off
15 # ====================================================================
16 # impl size in bytes comp cycles[*] measured performance
17 # ====================================================================
19 # armv4-small 392/+29% 1958/+64% 2250/+96%
20 # armv4-compact 740/+89% 1552/+26% 1840/+22%
21 # armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
22 # full unroll ~5100/+260% ~1260/+4% ~1300/+5%
23 # ====================================================================
24 # thumb = same as 'small' but in Thumb instructions[**] and
25 # with recurring code in two private functions;
26 # small = detached Xload/update, loops are folded;
27 # compact = detached Xload/update, 5x unroll;
28 # large = interleaved Xload/update, 5x unroll;
29 # full unroll = interleaved Xload/update, full unroll, estimated[!];
31 # [*] Manually counted instructions in "grand" loop body. Measured
32 # performance is affected by prologue and epilogue overhead,
33 # i-cache availability, branch penalties, etc.
34 # [**] While each Thumb instruction is twice smaller, they are not as
35 # diverse as ARM ones: e.g., there are only two arithmetic
36 # instructions with 3 arguments, no [fixed] rotate, addressing
37 # modes are limited. As result it takes more instructions to do
38 # the same job in Thumb, therefore the code is never twice as
39 # small and always slower.
40 # [***] which is also ~35% better than compiler generated code. Dual-
41 # issue Cortex A8 core was measured to process input block in
44 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
45 open STDOUT,">$output";
63 # One can optimize this for aligned access on big-endian architecture,
64 # but code's endian neutrality makes it too pretty:-)
66 my ($a,$b,$c,$d,$e)=@_;
72 add $e,$K,$e,ror#2 @ E+=K_00_19
74 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
76 eor $t1,$c,$d @ F_xx_xx
78 add $e,$e,$t0 @ E+=X[i]
83 my ($a,$b,$c,$d,$e,$flag)=@_;
89 add $e,$K,$e,ror#2 @ E+=K_xx_xx
93 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
95 $code.=<<___ if (!defined($flag));
96 eor $t1,$c,$d @ F_xx_xx, but not in 40_59
100 add $e,$e,$t0 @ E+=X[i]
106 my ($a,$b,$c,$d,$e)=@_;
110 eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
111 add $e,$e,$t1 @ E+=F_00_19(B,C,D)
116 my ($a,$b,$c,$d,$e)=@_;
120 eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
121 add $e,$e,$t1 @ E+=F_00_19(B,C,D)
126 my ($a,$b,$c,$d,$e)=@_;
129 eor $t1,$b,$t1,ror#2 @ F_20_39(B,C,D)
130 add $e,$e,$t1 @ E+=F_20_39(B,C,D)
135 my ($a,$b,$c,$d,$e)=@_;
142 add $e,$e,$t1 @ E+=F_40_59(B,C,D)
150 orr $t1,$t1,$t2 @ F_40_59(B,C,D)
151 add $e,$e,$t1 @ E+=F_40_59(B,C,D)
159 .global sha1_block_data_order
160 .type sha1_block_data_order,%function
163 sha1_block_data_order:
164 stmdb sp!,{r4-r12,lr}
165 add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
166 ldmia $ctx,{$a,$b,$c,$d,$e}
173 mov $e,$e,ror#30 @ [6]
176 for($i=0;$i<5;$i++) {
177 &BODY_00_15(@V); unshift(@V,pop(@V));
181 bne .L_00_15 @ [((11+4)*5+2)*3]
183 &BODY_00_15(@V); unshift(@V,pop(@V));
184 &BODY_16_19(@V); unshift(@V,pop(@V));
185 &BODY_16_19(@V); unshift(@V,pop(@V));
186 &BODY_16_19(@V); unshift(@V,pop(@V));
187 &BODY_16_19(@V); unshift(@V,pop(@V));
190 ldr $K,.LK_20_39 @ [+15+16*4]
192 cmn sp,#0 @ [+3], clear carry to denote 20_39
195 for($i=0;$i<5;$i++) {
196 &BODY_20_39(@V); unshift(@V,pop(@V));
199 teq $Xi,sp @ preserve carry
200 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
201 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
204 sub sp,sp,#20*4 @ [+2]
207 for($i=0;$i<5;$i++) {
208 &BODY_40_59(@V); unshift(@V,pop(@V));
212 bne .L_40_59 @ [+((12+5)*5+2)*4]
216 cmp sp,#0 @ set carry to denote 60_79
217 b .L_20_39_or_60_79 @ [+4], spare 300 bytes
219 add sp,sp,#80*4 @ "deallocate" stack frame
220 ldmia $ctx,{$K,$t0,$t1,$t2,$t3}
226 stmia $ctx,{$a,$b,$c,$d,$e}
228 bne .Lloop @ [+18], total 1307
230 ldmia sp!,{r4-r12,lr}
232 moveq pc,lr @ be binary compatible with V4, yet
233 bx lr @ interoperable with Thumb ISA:-)
235 .LK_00_19: .word 0x5a827999
236 .LK_20_39: .word 0x6ed9eba1
237 .LK_40_59: .word 0x8f1bbcdc
238 .LK_60_79: .word 0xca62c1d6
239 .size sha1_block_data_order,.-sha1_block_data_order
240 .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
244 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
246 close STDOUT; # enforce flush