3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # SHA256 block procedure for ARMv4. May 2007.
12 # Performance is ~2x better than gcc 3.4 generated code and in "abso-
13 # lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
14 # byte [on single-issue Xscale PXA250 core].
18 # Rescheduling for dual-issue pipeline resulted in 22% improvement on
19 # Cortex A8 core and ~20 cycles per processed byte.
23 # Profiler-assisted and platform-specific optimization resulted in 16%
24 # improvement on Cortex A8 core and ~16.4 cycles per processed byte.
26 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
27 open STDOUT,">$output";
41 @V=($A,$B,$C,$D,$E,$F,$G,$H);
51 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
53 $code.=<<___ if ($i<16);
55 @ ldr $t1,[$inp],#4 @ $i
57 str $inp,[sp,#17*4] @ make room for $t4
59 mov $t0,$e,ror#$Sigma1[0]
60 add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
62 eor $t0,$t0,$e,ror#$Sigma1[1]
64 @ ldrb $t1,[$inp,#3] @ $i
65 add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
70 orr $t1,$t1,$t0,lsl#16
72 str $inp,[sp,#17*4] @ make room for $t4
74 mov $t0,$e,ror#$Sigma1[0]
75 orr $t1,$t1,$t2,lsl#24
76 eor $t0,$t0,$e,ror#$Sigma1[1]
80 ldr $t2,[$Ktbl],#4 @ *K256++
81 add $h,$h,$t1 @ h+=X[i]
82 str $t1,[sp,#`$i%16`*4]
84 eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
86 add $h,$h,$t0 @ h+=Sigma1(e)
87 eor $t1,$t1,$g @ Ch(e,f,g)
88 add $h,$h,$t2 @ h+=K256[i]
89 mov $t0,$a,ror#$Sigma0[0]
90 add $h,$h,$t1 @ h+=Ch(e,f,g)
97 ldr $t1,[$inp],#4 @ prefetch
101 eor $t2,$a,$b @ a^b, b^c in next round
103 ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
104 eor $t2,$a,$b @ a^b, b^c in next round
105 ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
107 eor $t0,$a,ror#$Sigma0[1]
108 and $t3,$t3,$t2 @ (b^c)&=(a^b)
110 eor $t0,$a,ror#$Sigma0[2] @ Sigma0(a)
111 eor $t3,$t3,$b @ Maj(a,b,c)
112 add $h,$h,$t0 @ h+=Sigma0(a)
113 @ add $h,$h,$t3 @ h+=Maj(a,b,c)
119 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
122 @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
123 @ ldr $t4,[sp,#`($i+14)%16`*4]
124 mov $t0,$t1,ror#$sigma0[0]
125 add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
126 mov $t2,$t4,ror#$sigma1[0]
127 eor $t0,$t0,$t1,ror#$sigma0[1]
128 eor $t2,$t2,$t4,ror#$sigma1[1]
129 eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
130 ldr $t1,[sp,#`($i+0)%16`*4]
131 eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
132 ldr $t4,[sp,#`($i+9)%16`*4]
135 mov $t0,$e,ror#$Sigma1[0] @ from BODY_00_15
137 eor $t0,$t0,$e,ror#$Sigma1[1] @ from BODY_00_15
138 add $t1,$t1,$t4 @ X[i]
144 #include "arm_arch.h"
152 .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
153 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
154 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
155 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
156 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
157 .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
158 .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
159 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
160 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
161 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
162 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
163 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
164 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
165 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
166 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
167 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
170 .global sha256_block_data_order
171 .type sha256_block_data_order,%function
172 sha256_block_data_order:
173 sub r3,pc,#8 @ sha256_block_data_order
174 add $len,$inp,$len,lsl#6 @ len to point at the end of inp
175 stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
176 ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
177 sub $Ktbl,r3,#256 @ K256
178 sub sp,sp,#16*4 @ alloca(X[16])
185 eor $t3,$B,$C @ magic
188 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
189 $code.=".Lrounds_16_xx:\n";
190 for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
192 ldreq $t3,[sp,#16*4] @ pull ctx
195 add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
210 ldr $inp,[sp,#17*4] @ pull inp
211 ldr $t2,[sp,#18*4] @ pull inp+len
214 stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
216 sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
219 add sp,sp,#`16+3`*4 @ destroy frame
221 ldmia sp!,{r4-r11,pc}
223 ldmia sp!,{r4-r11,lr}
225 moveq pc,lr @ be binary compatible with V4, yet
226 bx lr @ interoperable with Thumb ISA:-)
228 .size sha256_block_data_order,.-sha256_block_data_order
229 .asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
233 $code =~ s/\`([^\`]*)\`/eval $1/gem;
234 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
236 close STDOUT; # enforce flush