# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
-# sha1_block precedure for ARMv4.
+# sha1_block procedure for ARMv4.
#
# January 2007.
# impl size in bytes comp cycles[*] measured performance
# ====================================================================
# thumb 304 3212 4420
-# armv4-small 392/+29% 1958/+64% 2290/+93%
-# armv4-compact 740/+89% 1552/+26% 1910/+20%
-# armv4-large 1420/+92% 1307/+19% 1630/+17%
-# full unroll ~5100/+260% ~1260/+4% ~1600/+2%
+# armv4-small 392/+29% 1958/+64% 2250/+96%
+# armv4-compact 740/+89% 1552/+26% 1840/+22%
+# armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
+# full unroll ~5100/+260% ~1260/+4% ~1300/+5%
# ====================================================================
# thumb = same as 'small' but in Thumb instructions[**] and
# with recurring code in two private functions;
# modes are limited. As result it takes more instructions to do
# the same job in Thumb, therefore the code is never twice as
# small and always slower.
+# [***] which is also ~35% better than compiler generated code. Dual-
+# issue Cortex A8 core was measured to process input block in
+# ~990 cycles.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
$ctx="r0";
$inp="r1";
$d="r6";
$e="r7";
$K="r8";
-$t0="r10";
-$t1="r11";
-$t2="r12";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
$Xi="r14";
@V=($a,$b,$c,$d,$e);
ldrb $t0,[$inp],#4
ldrb $t1,[$inp,#-3]
ldrb $t2,[$inp,#-2]
+ ldrb $t3,[$inp,#-1]
add $e,$K,$e,ror#2 @ E+=K_00_19
orr $t0,$t1,$t0,lsl#8
- ldrb $t1,[$inp,#-1]
add $e,$e,$a,ror#27 @ E+=ROR(A,27)
orr $t0,$t2,$t0,lsl#8
- orr $t0,$t1,$t0,lsl#8
+ eor $t1,$c,$d @ F_xx_xx
+ orr $t0,$t3,$t0,lsl#8
add $e,$e,$t0 @ E+=X[i]
str $t0,[$Xi,#-4]!
___
}
sub Xupdate {
-my ($a,$b,$c,$d,$e)=@_;
+my ($a,$b,$c,$d,$e,$flag)=@_;
$code.=<<___;
ldr $t0,[$Xi,#15*4]
ldr $t1,[$Xi,#13*4]
ldr $t2,[$Xi,#7*4]
+ ldr $t3,[$Xi,#2*4]
add $e,$K,$e,ror#2 @ E+=K_xx_xx
eor $t0,$t0,$t1
- ldr $t1,[$Xi,#2*4]
- add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+ eor $t2,$t2,$t3
eor $t0,$t0,$t2
- eor $t0,$t0,$t1
+ add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+___
+$code.=<<___ if (!defined($flag));
+ eor $t1,$c,$d @ F_xx_xx, but not in 40_59
+___
+$code.=<<___;
mov $t0,$t0,ror#31
add $e,$e,$t0 @ E+=X[i]
str $t0,[$Xi,#-4]!
my ($a,$b,$c,$d,$e)=@_;
&Xload(@_);
$code.=<<___;
- eor $t1,$c,$d
and $t1,$b,$t1,ror#2
eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
add $e,$e,$t1 @ E+=F_00_19(B,C,D)
my ($a,$b,$c,$d,$e)=@_;
&Xupdate(@_);
$code.=<<___;
- eor $t1,$c,$d
and $t1,$b,$t1,ror#2
eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
add $e,$e,$t1 @ E+=F_00_19(B,C,D)
my ($a,$b,$c,$d,$e)=@_;
&Xupdate(@_);
$code.=<<___;
- eor $t1,$c,$d
eor $t1,$b,$t1,ror#2 @ F_20_39(B,C,D)
add $e,$e,$t1 @ E+=F_20_39(B,C,D)
___
sub BODY_40_59 {
my ($a,$b,$c,$d,$e)=@_;
+if (1) {
&Xupdate(@_);
+$code.=<<___;
+ and $t2,$c,$d
+ and $t1,$b,$t1,ror#2
+ add $e,$e,$t2,ror#2
+ add $e,$e,$t1 @ E+=F_40_59(B,C,D)
+___
+} else {
+ &Xupdate(@_,1);
$code.=<<___;
and $t1,$b,$c,ror#2
orr $t2,$b,$c,ror#2
add $e,$e,$t1 @ E+=F_40_59(B,C,D)
___
}
+}
$code=<<___;
.text
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
- ldmia $ctx,{$K,$t0,$t1,$t2,$Xi}
+ ldmia $ctx,{$K,$t0,$t1,$t2,$t3}
add $a,$K,$a
add $b,$t0,$b
add $c,$t1,$c,ror#2
add $d,$t2,$d,ror#2
- add $e,$Xi,$e,ror#2
+ add $e,$t3,$e,ror#2
stmia $ctx,{$a,$b,$c,$d,$e}
teq $inp,$len
bne .Lloop @ [+18], total 1307
.LK_60_79: .word 0xca62c1d6
.size sha1_block_data_order,.-sha1_block_data_order
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
___
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
print $code;
+close STDOUT; # enforce flush