X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fbn%2Fasm%2Fsparcv9-mont.pl;h=2697965b3f28050f64f1f5b713038a0d5896bd35;hp=771cd96141c3c36b4518dcd0dda964dc41a3d101;hb=120a9e1a825bd0407639bedb1e8e15823cf7a545;hpb=6aa36e8e5a062e31543e7796f0351ff9628832ce diff --git a/crypto/bn/asm/sparcv9-mont.pl b/crypto/bn/asm/sparcv9-mont.pl index 771cd96141..2697965b3f 100644 --- a/crypto/bn/asm/sparcv9-mont.pl +++ b/crypto/bn/asm/sparcv9-mont.pl @@ -20,7 +20,7 @@ # for undertaken effort are multiple. First of all, UltraSPARC is not # the whole SPARCv9 universe and other VIS-free implementations deserve # optimized code as much. Secondly, newly introduced UltraSPARC T1, -# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes, +# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive paths, # such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with # several integrated RSA/DSA accelerator circuits accessible through # kernel driver [only(*)], but having decent user-land software @@ -300,7 +300,7 @@ ___ ######## .Lbn_sqr_mont gives up to 20% *overall* improvement over ######## code without following dedicated squaring procedure. ######## -$sbit="%i2"; # re-use $bp! +$sbit="%o5"; $code.=<<___; .align 32 @@ -413,7 +413,7 @@ $code.=<<___; mulx $apj,$mul0,$acc0 mulx $npj,$mul1,$acc1 add $acc0,$car0,$car0 - add $tpj,$car1,$car1 + add $tpj,$sbit,$sbit ld [$ap+$j],$apj ! ap[j] and $car0,$mask,$acc0 ld [$np+$j],$npj ! np[j] @@ -422,7 +422,7 @@ $code.=<<___; ld [$tp+8],$tpj ! tp[j] add $acc0,$acc0,$acc0 add $j,4,$j ! j++ - or $sbit,$acc0,$acc0 + add $sbit,$acc0,$acc0 srlx $acc0,32,$sbit and $acc0,$mask,$acc0 cmp $j,$num @@ -436,12 +436,12 @@ $code.=<<___; mulx $apj,$mul0,$acc0 mulx $npj,$mul1,$acc1 add $acc0,$car0,$car0 - add $tpj,$car1,$car1 + add $tpj,$sbit,$sbit and $car0,$mask,$acc0 srlx $car0,32,$car0 add $acc1,$car1,$car1 add $acc0,$acc0,$acc0 - or $sbit,$acc0,$acc0 + add $sbit,$acc0,$acc0 srlx $acc0,32,$sbit and $acc0,$mask,$acc0 add $acc0,$car1,$car1 @@ -449,7 +449,7 @@ $code.=<<___; srlx $car1,32,$car1 add $car0,$car0,$car0 - or $sbit,$car0,$car0 + add $sbit,$car0,$car0 add $car0,$car1,$car1 add $car2,$car1,$car1 st $car1,[$tp+4] @@ -509,7 +509,7 @@ $code.=<<___; .Lsqr_inner2: mulx $apj,$mul0,$acc0 mulx $npj,$mul1,$acc1 - add $tpj,$car1,$car1 + add $tpj,$sbit,$sbit add $acc0,$car0,$car0 ld [$ap+$j],$apj ! ap[j] and $car0,$mask,$acc0 @@ -517,7 +517,7 @@ $code.=<<___; srlx $car0,32,$car0 add $acc0,$acc0,$acc0 ld [$tp+8],$tpj ! tp[j] - or $sbit,$acc0,$acc0 + add $sbit,$acc0,$acc0 add $j,4,$j ! j++ srlx $acc0,32,$sbit and $acc0,$mask,$acc0 @@ -532,12 +532,12 @@ $code.=<<___; .Lsqr_no_inner2: mulx $apj,$mul0,$acc0 mulx $npj,$mul1,$acc1 - add $tpj,$car1,$car1 + add $tpj,$sbit,$sbit add $acc0,$car0,$car0 and $car0,$mask,$acc0 srlx $car0,32,$car0 add $acc0,$acc0,$acc0 - or $sbit,$acc0,$acc0 + add $sbit,$acc0,$acc0 srlx $acc0,32,$sbit and $acc0,$mask,$acc0 add $acc0,$car1,$car1 @@ -546,7 +546,7 @@ $code.=<<___; srlx $car1,32,$car1 add $car0,$car0,$car0 - or $sbit,$car0,$car0 + add $sbit,$car0,$car0 add $car0,$car1,$car1 add $car2,$car1,$car1 st $car1,[$tp+4] @@ -591,14 +591,17 @@ $code.=<<___; !.Lsqr_last mulx $npj,$mul1,$acc1 - add $tpj,$car1,$car1 + add $tpj,$acc0,$acc0 + srlx $acc0,32,$tmp0 + and $acc0,$mask,$acc0 + add $tmp0,$sbit,$sbit add $acc0,$car1,$car1 add $acc1,$car1,$car1 st $car1,[$tp] srlx $car1,32,$car1 add $car0,$car0,$car0 ! recover $car0 - or $sbit,$car0,$car0 + add $sbit,$car0,$car0 add $car0,$car1,$car1 add $car2,$car1,$car1 st $car1,[$tp+4]