Modest improvement coefficients mean that code already had some
parallelism and there was not very much room for improvement. Special
thanks to Ted Krovetz for benchmarking the code with such patience.
# allows to merge logical or arithmetic operation with shift or rotate
# in one instruction and emit combined result every cycle. The module
# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
# allows to merge logical or arithmetic operation with shift or rotate
# in one instruction and emit combined result every cycle. The module
# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
+# key [on single-issue Xscale PXA250 core].
# May 2007.
#
# AES_set_[en|de]crypt_key is added.
# May 2007.
#
# AES_set_[en|de]crypt_key is added.
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 12% improvement on
+# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
+
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
- orr $s0,$s0,$t2,lsl#16
- orr $s0,$s0,$t3,lsl#24
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
- orr $s1,$s1,$t2,lsl#16
- orr $s1,$s1,$t3,lsl#24
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
- orr $s2,$s2,$t2,lsl#16
- orr $s2,$s2,$t3,lsl#24
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
mov $t3,$s0,lsr#8
strb $t1,[$rounds,#0]
strb $t2,[$rounds,#1]
mov $t3,$s0,lsr#8
strb $t1,[$rounds,#0]
strb $t2,[$rounds,#1]
- strb $t3,[$rounds,#2]
- strb $s0,[$rounds,#3]
mov $t3,$s1,lsr#8
strb $t1,[$rounds,#4]
strb $t2,[$rounds,#5]
mov $t3,$s1,lsr#8
strb $t1,[$rounds,#4]
strb $t2,[$rounds,#5]
- strb $t3,[$rounds,#6]
- strb $s1,[$rounds,#7]
mov $t3,$s2,lsr#8
strb $t1,[$rounds,#8]
strb $t2,[$rounds,#9]
mov $t3,$s2,lsr#8
strb $t1,[$rounds,#8]
strb $t2,[$rounds,#9]
- strb $t3,[$rounds,#10]
- strb $s2,[$rounds,#11]
mov $t3,$s3,lsr#8
strb $t1,[$rounds,#12]
strb $t2,[$rounds,#13]
mov $t3,$s3,lsr#8
strb $t1,[$rounds,#12]
strb $t2,[$rounds,#13]
.align 2
_armv4_AES_encrypt:
str lr,[sp,#-4]! @ push lr
.align 2
_armv4_AES_encrypt:
str lr,[sp,#-4]! @ push lr
- ldr $t1,[$key],#16
- ldr $t2,[$key,#-12]
- ldr $t3,[$key,#-8]
- ldr $i1,[$key,#-4]
- ldr $rounds,[$key,#240-16]
+ ldr $rounds,[$key,#240-16]
eor $s1,$s1,$t2
eor $s2,$s2,$t3
eor $s3,$s3,$i1
sub $rounds,$rounds,#1
mov lr,#255
eor $s1,$s1,$t2
eor $s2,$s2,$t3
eor $s3,$s3,$i1
sub $rounds,$rounds,#1
mov lr,#255
and $i2,lr,$s0,lsr#8
and $i3,lr,$s0,lsr#16
and $i2,lr,$s0,lsr#8
and $i3,lr,$s0,lsr#16
ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0]
ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0]
- ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24]
- ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8]
- ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16]
-
and $i1,lr,$s1,lsr#16 @ i0
and $i1,lr,$s1,lsr#16 @ i0
+ ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8]
+ ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16]
+ ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24]
ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16]
ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16]
- ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24]
ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0]
ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8]
eor $s0,$s0,$i1,ror#8
ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0]
ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8]
eor $s0,$s0,$i1,ror#8
- eor $s1,$s1,$t1,ror#24
- eor $t2,$t2,$i2,ror#8
- eor $t3,$t3,$i3,ror#8
-
+ ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24]
and $i1,lr,$s2,lsr#8 @ i0
and $i1,lr,$s2,lsr#8 @ i0
and $i2,lr,$s2,lsr#16 @ i1
and $i2,lr,$s2,lsr#16 @ i1
ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8]
ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8]
ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
- ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0]
eor $s0,$s0,$i1,ror#16
ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0]
eor $s0,$s0,$i1,ror#16
- eor $s1,$s1,$i2,ror#8
- eor $s2,$s2,$t2,ror#16
- eor $t3,$t3,$i3,ror#16
-
+ ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
and $i2,lr,$s3,lsr#8 @ i1
and $i2,lr,$s3,lsr#8 @ i1
and $i3,lr,$s3,lsr#16 @ i2
and $i3,lr,$s3,lsr#16 @ i2
ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0]
ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0]
ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16]
ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16]
- ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
+ ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
- ldr $t1,[$key],#16
- ldr $t2,[$key,#-12]
- ldr $t3,[$key,#-8]
- ldr $i1,[$key,#-4]
- eor $s0,$s0,$t1
- eor $s1,$s1,$t2
- eor $s2,$s2,$t3
- eor $s3,$s3,$i1
+ ldr $t2,[$key,#-8]
+ eor $s0,$s0,$i1
+ ldr $t3,[$key,#-4]
+ and $i1,lr,$s0
+ eor $s1,$s1,$t1
+ and $i2,lr,$s0,lsr#8
+ eor $s2,$s2,$t2
+ and $i3,lr,$s0,lsr#16
+ eor $s3,$s3,$t3
+ mov $s0,$s0,lsr#24
subs $rounds,$rounds,#1
bne .Lenc_loop
add $tbl,$tbl,#2
subs $rounds,$rounds,#1
bne .Lenc_loop
add $tbl,$tbl,#2
- and $i1,lr,$s0
- and $i2,lr,$s0,lsr#8
- and $i3,lr,$s0,lsr#16
- mov $s0,$s0,lsr#24
ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0]
ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0]
- ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24]
- ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8]
- ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16]
-
and $i1,lr,$s1,lsr#16 @ i0
and $i1,lr,$s1,lsr#16 @ i0
+ ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8]
+ ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16]
+ ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24]
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16]
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16]
- ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24]
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8]
eor $s0,$i1,$s0,lsl#8
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8]
eor $s0,$i1,$s0,lsl#8
- eor $s1,$t1,$s1,lsl#24
- eor $t2,$i2,$t2,lsl#8
- eor $t3,$i3,$t3,lsl#8
-
+ ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24]
and $i1,lr,$s2,lsr#8 @ i0
and $i1,lr,$s2,lsr#8 @ i0
and $i2,lr,$s2,lsr#16 @ i1
and $i2,lr,$s2,lsr#16 @ i1
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8]
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8]
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
- ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0]
eor $s0,$i1,$s0,lsl#8
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0]
eor $s0,$i1,$s0,lsl#8
- eor $s1,$s1,$i2,lsl#16
- eor $s2,$t2,$s2,lsl#24
- eor $t3,$i3,$t3,lsl#8
-
+ ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
and $i2,lr,$s3,lsr#8 @ i1
and $i2,lr,$s3,lsr#8 @ i1
and $i3,lr,$s3,lsr#16 @ i2
and $i3,lr,$s3,lsr#16 @ i2
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0]
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0]
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16]
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16]
- ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
+ ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
+ ldr $i1,[$key,#0]
- ldr lr,[sp],#4 @ pop lr
- ldr $t1,[$key,#0]
- ldr $t2,[$key,#4]
- ldr $t3,[$key,#8]
- ldr $i1,[$key,#12]
- eor $s0,$s0,$t1
- eor $s1,$s1,$t2
- eor $s2,$s2,$t3
- eor $s3,$s3,$i1
+ eor $s0,$s0,$i1
+ eor $s1,$s1,$t1
+ eor $s2,$s2,$t2
+ eor $s3,$s3,$t3
+ ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
.global AES_set_encrypt_key
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
.global AES_set_encrypt_key
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
- orr $s0,$s0,$t2,lsl#16
- orr $s0,$s0,$t3,lsl#24
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
- orr $s1,$s1,$t2,lsl#16
- orr $s1,$s1,$t3,lsl#24
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
- orr $s2,$s2,$t2,lsl#16
- orr $s2,$s2,$t3,lsl#24
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
- orr $s3,$s3,$t2,lsl#16
- orr $s3,$s3,$t3,lsl#24
str $s2,[$key,#-8]
str $s3,[$key,#-4]
str $s2,[$key,#-8]
str $s3,[$key,#-4]
.L128_loop:
and $t2,lr,$s3,lsr#24
and $i1,lr,$s3,lsr#16
.L128_loop:
and $t2,lr,$s3,lsr#24
and $i1,lr,$s3,lsr#16
- and $i2,lr,$s3,lsr#8
- and $i3,lr,$s3
- ldrb $i3,[$tbl,$i3]
- ldr $t1,[$t3],#4 @ rcon[i++]
+ ldr $t1,[$t3],#4 @ rcon[i++]
orr $t2,$t2,$i3,lsl#8
eor $t2,$t2,$t1
eor $s0,$s0,$t2 @ rk[4]=rk[0]^...
eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4]
orr $t2,$t2,$i3,lsl#8
eor $t2,$t2,$t1
eor $s0,$s0,$t2 @ rk[4]=rk[0]^...
eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4]
- eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5]
- eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6]
+ eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5]
+ eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6]
bne .L128_loop
sub r2,$key,#176
b .Ldone
bne .L128_loop
sub r2,$key,#176
b .Ldone
ldrb $t2,[$rounds,#17]
ldrb $t3,[$rounds,#16]
orr $i2,$i2,$t1,lsl#8
ldrb $t2,[$rounds,#17]
ldrb $t3,[$rounds,#16]
orr $i2,$i2,$t1,lsl#8
- orr $i2,$i2,$t2,lsl#16
- orr $i2,$i2,$t3,lsl#24
ldrb $t2,[$rounds,#21]
ldrb $t3,[$rounds,#20]
orr $i3,$i3,$t1,lsl#8
orr $i3,$i3,$t2,lsl#16
ldrb $t2,[$rounds,#21]
ldrb $t3,[$rounds,#20]
orr $i3,$i3,$t1,lsl#8
orr $i3,$i3,$t2,lsl#16
str $i3,[$key,#-4]
teq lr,#192
str $i3,[$key,#-4]
teq lr,#192
.L192_loop:
and $t2,lr,$i3,lsr#24
and $i1,lr,$i3,lsr#16
.L192_loop:
and $t2,lr,$i3,lsr#24
and $i1,lr,$i3,lsr#16
- and $i2,lr,$i3,lsr#8
- and $i3,lr,$i3
- ldrb $i3,[$tbl,$i3]
- ldr $t1,[$t3],#4 @ rcon[i++]
+ ldr $t1,[$t3],#4 @ rcon[i++]
orr $t2,$t2,$i3,lsl#8
eor $i3,$t2,$t1
eor $s0,$s0,$i3 @ rk[6]=rk[0]^...
eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6]
orr $t2,$t2,$i3,lsl#8
eor $i3,$t2,$t1
eor $s0,$s0,$i3 @ rk[6]=rk[0]^...
eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6]
- eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7]
- eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8]
+ eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7]
+ eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8]
subeq r2,$key,#216
beq .Ldone
subeq r2,$key,#216
beq .Ldone
ldrb $t2,[$rounds,#25]
ldrb $t3,[$rounds,#24]
orr $i2,$i2,$t1,lsl#8
ldrb $t2,[$rounds,#25]
ldrb $t3,[$rounds,#24]
orr $i2,$i2,$t1,lsl#8
- orr $i2,$i2,$t2,lsl#16
- orr $i2,$i2,$t3,lsl#24
ldrb $t2,[$rounds,#29]
ldrb $t3,[$rounds,#28]
orr $i3,$i3,$t1,lsl#8
orr $i3,$i3,$t2,lsl#16
ldrb $t2,[$rounds,#29]
ldrb $t3,[$rounds,#28]
orr $i3,$i3,$t1,lsl#8
orr $i3,$i3,$t2,lsl#16
str $i3,[$key,#-4]
mov $rounds,#14
str $i3,[$key,#-4]
mov $rounds,#14
.L256_loop:
and $t2,lr,$i3,lsr#24
and $i1,lr,$i3,lsr#16
.L256_loop:
and $t2,lr,$i3,lsr#24
and $i1,lr,$i3,lsr#16
- and $i2,lr,$i3,lsr#8
- and $i3,lr,$i3
- ldrb $i3,[$tbl,$i3]
- ldr $t1,[$t3],#4 @ rcon[i++]
+ ldr $t1,[$t3],#4 @ rcon[i++]
orr $t2,$t2,$i3,lsl#8
eor $i3,$t2,$t1
eor $s0,$s0,$i3 @ rk[8]=rk[0]^...
eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8]
orr $t2,$t2,$i3,lsl#8
eor $i3,$t2,$t1
eor $s0,$s0,$i3 @ rk[8]=rk[0]^...
eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8]
- eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9]
- eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10]
+ eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9]
+ eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10]
subeq r2,$key,#256
beq .Ldone
and $t2,lr,$s3
and $i1,lr,$s3,lsr#8
subeq r2,$key,#256
beq .Ldone
and $t2,lr,$s3
and $i1,lr,$s3,lsr#8
- and $i2,lr,$s3,lsr#16
- and $i3,lr,$s3,lsr#24
ldr $i1,[$key,#-44]
ldr $i2,[$key,#-40]
ldr $i1,[$key,#-44]
ldr $i2,[$key,#-40]
eor $t1,$t1,$t2 @ rk[12]=rk[4]^...
eor $t1,$t1,$t2 @ rk[12]=rk[4]^...
eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12]
eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12]
- eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13]
- eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14]
+ eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13]
+ eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14]
str $i2,[$key,#-8]
str $i3,[$key,#-4]
b .L256_loop
str $i2,[$key,#-8]
str $i3,[$key,#-4]
b .L256_loop
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
- orr $s0,$s0,$t2,lsl#16
- orr $s0,$s0,$t3,lsl#24
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
- orr $s1,$s1,$t2,lsl#16
- orr $s1,$s1,$t3,lsl#24
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
- orr $s2,$s2,$t2,lsl#16
- orr $s2,$s2,$t3,lsl#24
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
mov $t3,$s0,lsr#8
strb $t1,[$rounds,#0]
strb $t2,[$rounds,#1]
mov $t3,$s0,lsr#8
strb $t1,[$rounds,#0]
strb $t2,[$rounds,#1]
- strb $t3,[$rounds,#2]
- strb $s0,[$rounds,#3]
mov $t3,$s1,lsr#8
strb $t1,[$rounds,#4]
strb $t2,[$rounds,#5]
mov $t3,$s1,lsr#8
strb $t1,[$rounds,#4]
strb $t2,[$rounds,#5]
- strb $t3,[$rounds,#6]
- strb $s1,[$rounds,#7]
mov $t3,$s2,lsr#8
strb $t1,[$rounds,#8]
strb $t2,[$rounds,#9]
mov $t3,$s2,lsr#8
strb $t1,[$rounds,#8]
strb $t2,[$rounds,#9]
- strb $t3,[$rounds,#10]
- strb $s2,[$rounds,#11]
mov $t3,$s3,lsr#8
strb $t1,[$rounds,#12]
strb $t2,[$rounds,#13]
mov $t3,$s3,lsr#8
strb $t1,[$rounds,#12]
strb $t2,[$rounds,#13]
.align 2
_armv4_AES_decrypt:
str lr,[sp,#-4]! @ push lr
.align 2
_armv4_AES_decrypt:
str lr,[sp,#-4]! @ push lr
- ldr $t1,[$key],#16
- ldr $t2,[$key,#-12]
- ldr $t3,[$key,#-8]
- ldr $i1,[$key,#-4]
- ldr $rounds,[$key,#240-16]
+ ldr $rounds,[$key,#240-16]
eor $s1,$s1,$t2
eor $s2,$s2,$t3
eor $s3,$s3,$i1
sub $rounds,$rounds,#1
mov lr,#255
eor $s1,$s1,$t2
eor $s2,$s2,$t3
eor $s3,$s3,$i1
sub $rounds,$rounds,#1
mov lr,#255
and $i1,lr,$s0,lsr#16
and $i2,lr,$s0,lsr#8
and $i3,lr,$s0
mov $s0,$s0,lsr#24
and $i1,lr,$s0,lsr#16
and $i2,lr,$s0,lsr#8
and $i3,lr,$s0
mov $s0,$s0,lsr#24
ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16]
ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16]
- ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24]
- ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8]
- ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0]
-
+ ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8]
+ ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0]
+ ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24]
ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0]
ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0]
- ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24]
ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16]
ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8]
eor $s0,$s0,$i1,ror#24
ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16]
ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8]
eor $s0,$s0,$i1,ror#24
- eor $s1,$s1,$t1,ror#8
- eor $t2,$i2,$t2,ror#8
- eor $t3,$i3,$t3,ror#8
-
+ ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24]
and $i1,lr,$s2,lsr#8 @ i0
and $i1,lr,$s2,lsr#8 @ i0
ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8]
ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8]
ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
- ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16]
eor $s0,$s0,$i1,ror#16
ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16]
eor $s0,$s0,$i1,ror#16
- eor $s1,$s1,$i2,ror#24
- eor $s2,$s2,$t2,ror#8
- eor $t3,$i3,$t3,ror#8
-
+ ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
and $i1,lr,$s3,lsr#16 @ i0
and $i1,lr,$s3,lsr#16 @ i0
and $i2,lr,$s3,lsr#8 @ i1
and $i2,lr,$s3,lsr#8 @ i1
ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16]
ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16]
ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0]
ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0]
- ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
+ ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
eor $s1,$s1,$i2,ror#16
eor $s2,$s2,$i3,ror#24
eor $s1,$s1,$i2,ror#16
eor $s2,$s2,$i3,ror#24
- ldr $t1,[$key],#16
- ldr $t2,[$key,#-12]
- ldr $t3,[$key,#-8]
- ldr $i1,[$key,#-4]
- eor $s0,$s0,$t1
- eor $s1,$s1,$t2
- eor $s2,$s2,$t3
- eor $s3,$s3,$i1
+ ldr $t1,[$key,#-12]
+ ldr $t2,[$key,#-8]
+ eor $s0,$s0,$i1
+ ldr $t3,[$key,#-4]
+ and $i1,lr,$s0,lsr#16
+ eor $s1,$s1,$t1
+ and $i2,lr,$s0,lsr#8
+ eor $s2,$s2,$t2
+ and $i3,lr,$s0
+ eor $s3,$s3,$t3
+ mov $s0,$s0,lsr#24
subs $rounds,$rounds,#1
bne .Ldec_loop
add $tbl,$tbl,#1024
subs $rounds,$rounds,#1
bne .Ldec_loop
add $tbl,$tbl,#1024
- ldr $t1,[$tbl,#0] @ prefetch Td4
- ldr $t2,[$tbl,#32]
- ldr $t3,[$tbl,#64]
- ldr $i1,[$tbl,#96]
- ldr $i2,[$tbl,#128]
- ldr $i3,[$tbl,#160]
- ldr $t1,[$tbl,#192]
- ldr $t2,[$tbl,#224]
+ ldr $t2,[$tbl,#0] @ prefetch Td4
+ ldr $t3,[$tbl,#32]
+ ldr $t1,[$tbl,#64]
+ ldr $t2,[$tbl,#96]
+ ldr $t3,[$tbl,#128]
+ ldr $t1,[$tbl,#160]
+ ldr $t2,[$tbl,#192]
+ ldr $t3,[$tbl,#224]
- and $i1,lr,$s0,lsr#16
- and $i2,lr,$s0,lsr#8
- and $i3,lr,$s0
- ldrb $s0,[$tbl,$s0,lsr#24] @ Td4[s0>>24]
+ ldrb $s0,[$tbl,$s0] @ Td4[s0>>24]
ldrb $t1,[$tbl,$i1] @ Td4[s0>>16]
ldrb $t1,[$tbl,$i1] @ Td4[s0>>16]
- ldrb $t2,[$tbl,$i2] @ Td4[s0>>8]
- ldrb $t3,[$tbl,$i3] @ Td4[s0>>0]
-
+ ldrb $t2,[$tbl,$i2] @ Td4[s0>>8]
+ ldrb $t3,[$tbl,$i3] @ Td4[s0>>0]
ldrb $i1,[$tbl,$i1] @ Td4[s1>>0]
ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24]
ldrb $i2,[$tbl,$i2] @ Td4[s1>>16]
ldrb $i1,[$tbl,$i1] @ Td4[s1>>0]
ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24]
ldrb $i2,[$tbl,$i2] @ Td4[s1>>16]
- ldrb $i3,[$tbl,$i3] @ Td4[s1>>8]
+ ldrb $i3,[$tbl,$i3] @ Td4[s1>>8]
- eor $t2,$t2,$i2,lsl#8
- eor $t3,$t3,$i3,lsl#8
-
and $i1,lr,$s2,lsr#8 @ i0
and $i1,lr,$s2,lsr#8 @ i0
ldrb $i1,[$tbl,$i1] @ Td4[s2>>8]
ldrb $i1,[$tbl,$i1] @ Td4[s2>>8]
+ and $i3,lr,$s2,lsr#16
+
ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24]
ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24]
- ldrb $i3,[$tbl,$i3] @ Td4[s2>>16]
+ ldrb $i3,[$tbl,$i3] @ Td4[s2>>16]
- eor $s2,$t2,$s2,lsl#16
- eor $t3,$t3,$i3,lsl#16
-
and $i1,lr,$s3,lsr#16 @ i0
and $i1,lr,$s3,lsr#16 @ i0
and $i2,lr,$s3,lsr#8 @ i1
and $i2,lr,$s3,lsr#8 @ i1
ldrb $i1,[$tbl,$i1] @ Td4[s3>>16]
ldrb $i1,[$tbl,$i1] @ Td4[s3>>16]
ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
ldrb $i3,[$tbl,$i3] @ Td4[s3>>0]
ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24]
eor $s0,$s0,$i1,lsl#16
ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
ldrb $i3,[$tbl,$i3] @ Td4[s3>>0]
ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24]
eor $s0,$s0,$i1,lsl#16
- ldr lr,[sp],#4 @ pop lr
- ldr $t1,[$key,#0]
- ldr $t2,[$key,#4]
- ldr $t3,[$key,#8]
- ldr $i1,[$key,#12]
- eor $s0,$s0,$t1
- eor $s1,$s1,$t2
- eor $s2,$s2,$t3
- eor $s3,$s3,$i1
+ eor $s0,$s0,$i1
+ eor $s1,$s1,$t1
+ eor $s2,$s2,$t2
+ eor $s3,$s3,$t3
+ ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
.asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
.asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
# loop, this assembler loop body was found to be ~3x smaller than
# compiler-generated one...
#
# loop, this assembler loop body was found to be ~3x smaller than
# compiler-generated one...
#
+# July 2010
+#
+# Rescheduling for dual-issue pipeline resulted in 8.5% improvement on
+# Cortex A8 core and ~25 cycles per processed byte (which was observed
+# to be ~3 times faster than gcc-generated code:-)
+#
# Note about "528B" variant. In ARM case it makes lesser sense to
# implement it for following reasons:
#
# Note about "528B" variant. In ARM case it makes lesser sense to
# implement it for following reasons:
#
add $Zhh,$Htbl,$nlo,lsl#4
ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
add $Zhh,$Htbl,$nlo,lsl#4
ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
and $nhi,$Zll,#0xf @ rem
ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
and $nhi,$Zll,#0xf @ rem
ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
eor $Zll,$Zll,$Zlh,lsl#28
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
eor $Zll,$Zll,$Zlh,lsl#28
eor $Zhl,$Zhl,$Zhh,lsl#28
eor $Zhh,$Thh,$Zhh,lsr#4
eor $nlo,$nlo,$nhi
eor $Zhl,$Zhl,$Zhh,lsl#28
eor $Zhh,$Thh,$Zhh,lsr#4
eor $nlo,$nlo,$nhi
- eor $Zhh,$Zhh,$Tll,lsl#16
and $nhi,$nlo,#0xf0
and $nlo,$nlo,#0x0f
and $nhi,$nlo,#0xf0
and $nlo,$nlo,#0x0f
+ eor $Zhh,$Zhh,$Tll,lsl#16
.Loop:
add $Thh,$Htbl,$nlo,lsl#4
subs $cnt,$cnt,#1
.Loop:
add $Thh,$Htbl,$nlo,lsl#4
subs $cnt,$cnt,#1
- ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
+ ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
add $nlo,$nlo,$nlo
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[sp,$nlo] @ rem_4bit[rem]
add $nlo,$nlo,$nlo
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[sp,$nlo] @ rem_4bit[rem]
add $Thh,$Htbl,$nhi
eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
add $Thh,$Htbl,$nhi
eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
- ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
+ ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
add $nhi,$nhi,$nhi
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
eor $Zll,$Zll,$Zlh,lsl#28
add $nhi,$nhi,$nhi
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
eor $Zll,$Zll,$Zlh,lsl#28
eor $Zlh,$Zlh,$Zhl,lsl#28
eor $Zhl,$Thl,$Zhl,lsr#4
eor $Zhl,$Zhl,$Zhh,lsl#28
eor $Zlh,$Zlh,$Zhl,lsl#28
eor $Zhl,$Thl,$Zhl,lsr#4
eor $Zhl,$Zhl,$Zhh,lsl#28
- eor $Zhh,$Thh,$Zhh,lsr#4
- eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
+ eor $Zhh,$Thh,$Zhh,lsr#4
andpl $nhi,$nlo,#0xf0
andpl $nlo,$nlo,#0x0f
andpl $nhi,$nlo,#0xf0
andpl $nlo,$nlo,#0x0f
+ eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
bpl .Loop
ldr $len,[sp,#32] @ re-load $len/end
bpl .Loop
ldr $len,[sp,#32] @ re-load $len/end
add $Thh,$Htbl,$nhi
and $nhi,$Zll,#0xf @ rem
ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
add $Thh,$Htbl,$nhi
and $nhi,$Zll,#0xf @ rem
ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
eor $Zll,$Zll,$Zlh,lsl#28
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
eor $Zll,$Zll,$Zlh,lsl#28
.Loop2:
add $Thh,$Htbl,$nlo,lsl#4
subs $cnt,$cnt,#1
.Loop2:
add $Thh,$Htbl,$nlo,lsl#4
subs $cnt,$cnt,#1
- ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
+ ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
add $nlo,$nlo,$nlo
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem]
add $nlo,$nlo,$nlo
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem]
add $Thh,$Htbl,$nhi
eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
add $Thh,$Htbl,$nhi
eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
- ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
+ ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
add $nhi,$nhi,$nhi
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
add $nhi,$nhi,$nhi
eor $Zll,$Tll,$Zll,lsr#4
ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
eor $Zhl,$Zhl,$Zhh,lsl#28
eor $Zhh,$Thh,$Zhh,lsr#4
andpl $nhi,$nlo,#0xf0
eor $Zhl,$Zhl,$Zhh,lsl#28
eor $Zhh,$Thh,$Zhh,lsr#4
andpl $nhi,$nlo,#0xf0
- eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
+ eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
bpl .Loop2
___
&Zsmash();
bpl .Loop2
___
&Zsmash();
# Performance is ~2x better than gcc 3.4 generated code and in "abso-
# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
# Performance is ~2x better than gcc 3.4 generated code and in "abso-
# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+# byte [on single-issue Xscale PXA250 core].
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
___
$code.=<<___;
ldr $t2,[$Ktbl],#4 @ *K256++
___
$code.=<<___;
ldr $t2,[$Ktbl],#4 @ *K256++
- str $T1,[sp,#`$i%16`*4]
mov $t0,$e,ror#$Sigma1[0]
mov $t0,$e,ror#$Sigma1[0]
+ str $T1,[sp,#`$i%16`*4]
eor $t0,$t0,$e,ror#$Sigma1[1]
eor $t0,$t0,$e,ror#$Sigma1[1]
- eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
- add $T1,$T1,$t0
+ eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
eor $t1,$t1,$g @ Ch(e,f,g)
eor $t1,$t1,$g @ Ch(e,f,g)
eor $h,$h,$a,ror#$Sigma0[1]
eor $h,$h,$a,ror#$Sigma0[1]
eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a)
orr $t0,$a,$b
eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a)
orr $t0,$a,$b
+ and $t0,$t0,$c
+ add $h,$h,$T1
orr $t0,$t0,$t1 @ Maj(a,b,c)
orr $t0,$t0,$t1 @ Maj(a,b,c)
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
- ldr $t1,[sp,#`($i+1)%16`*4] @ $i
+ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
ldr $t2,[sp,#`($i+14)%16`*4]
ldr $T1,[sp,#`($i+0)%16`*4]
ldr $t2,[sp,#`($i+14)%16`*4]
ldr $T1,[sp,#`($i+0)%16`*4]
- ldr $inp,[sp,#`($i+9)%16`*4]
mov $t0,$t1,ror#$sigma0[0]
mov $t0,$t1,ror#$sigma0[0]
+ ldr $inp,[sp,#`($i+9)%16`*4]
eor $t0,$t0,$t1,ror#$sigma0[1]
eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
mov $t1,$t2,ror#$sigma1[0]
eor $t0,$t0,$t1,ror#$sigma0[1]
eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
mov $t1,$t2,ror#$sigma1[0]
eor $t1,$t1,$t2,ror#$sigma1[1]
eor $t1,$t1,$t2,ror#$sigma1[1]
eor $t1,$t1,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
eor $t1,$t1,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
# SHA512 block procedure for ARMv4. September 2007.
# This code is ~4.5 (four and a half) times faster than code generated
# SHA512 block procedure for ARMv4. September 2007.
# This code is ~4.5 (four and a half) times faster than code generated
-# by gcc 3.4 and it spends ~72 clock cycles per byte.
+# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+# Xscale PXA250 core].
+#
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 6% improvement on
+# Cortex A8 core and ~40 cycles per processed byte.
# Byte order [in]dependence. =========================================
#
# Byte order [in]dependence. =========================================
#
eor $t0,$t0,$Elo,lsl#23
eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e)
adds $Tlo,$Tlo,$t0
eor $t0,$t0,$Elo,lsl#23
eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e)
adds $Tlo,$Tlo,$t0
- adc $Thi,$Thi,$t1 @ T += Sigma1(e)
- adds $Tlo,$Tlo,$t2
- adc $Thi,$Thi,$t3 @ T += h
-
ldr $t0,[sp,#$Foff+0] @ f.lo
ldr $t0,[sp,#$Foff+0] @ f.lo
+ adc $Thi,$Thi,$t1 @ T += Sigma1(e)
ldr $t1,[sp,#$Foff+4] @ f.hi
ldr $t1,[sp,#$Foff+4] @ f.hi
ldr $t2,[sp,#$Goff+0] @ g.lo
ldr $t2,[sp,#$Goff+0] @ g.lo
+ adc $Thi,$Thi,$t3 @ T += h
ldr $t3,[sp,#$Goff+4] @ g.hi
ldr $t3,[sp,#$Goff+4] @ g.hi
- str $Elo,[sp,#$Eoff+0]
- str $Ehi,[sp,#$Eoff+4]
- str $Alo,[sp,#$Aoff+0]
- str $Ahi,[sp,#$Aoff+4]
- eor $t1,$t1,$t3 @ Ch(e,f,g)
-
ldr $t2,[$Ktbl,#4] @ K[i].lo
ldr $t2,[$Ktbl,#4] @ K[i].lo
+ eor $t1,$t1,$t3 @ Ch(e,f,g)
ldr $t3,[$Ktbl,#0] @ K[i].hi
ldr $t3,[$Ktbl,#0] @ K[i].hi
- ldr $Elo,[sp,#$Doff+0] @ d.lo
- ldr $Ehi,[sp,#$Doff+4] @ d.hi
+ ldr $Elo,[sp,#$Doff+0] @ d.lo
adc $Thi,$Thi,$t1 @ T += Ch(e,f,g)
adc $Thi,$Thi,$t1 @ T += Ch(e,f,g)
+ ldr $Ehi,[sp,#$Doff+4] @ d.hi
adds $Tlo,$Tlo,$t2
adc $Thi,$Thi,$t3 @ T += K[i]
adds $Elo,$Elo,$Tlo
adds $Tlo,$Tlo,$t2
adc $Thi,$Thi,$t3 @ T += K[i]
adds $Elo,$Elo,$Tlo