X-Git-Url: https://git.openssl.org/gitweb/?a=blobdiff_plain;f=crypto%2Fbn%2Fasm%2Fsparcv9a-mont.pl;h=a14205f2f006f111557cf9366ebdffe814f39846;hb=acdf0814234c8a20bcbdab7f1cf0b4d452adb415;hp=038081100f4a34eba63958842f51fd0007f96b8f;hpb=3b4a0225e248775bf7eb7919be87df40209dece3;p=openssl.git diff --git a/crypto/bn/asm/sparcv9a-mont.pl b/crypto/bn/asm/sparcv9a-mont.pl index 038081100f..a14205f2f0 100755 --- a/crypto/bn/asm/sparcv9a-mont.pl +++ b/crypto/bn/asm/sparcv9a-mont.pl @@ -2,8 +2,9 @@ # ==================================================================== # Written by Andy Polyakov for the OpenSSL -# project. Rights for redistribution and usage in source and binary -# forms are granted according to the OpenSSL license. +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. # ==================================================================== # October 2005 @@ -17,8 +18,8 @@ # implementations from compatibility matrix. But the rest, whole Sun # UltraSPARC family and brand new Fujitsu's SPARC64 V, all support # VIS extension instructions used in this module. This is considered -# good enough to recommend HAL SPARC64 users [if any] to simply fall -# down to no-asm configuration. +# good enough to not care about HAL SPARC64 users [if any] who have +# integer-only pure SPARCv9 module to "fall down" to. # USI&II cores currently exhibit uniform 2x improvement [over pre- # bn_mul_mont codebase] for all key lengths and benchmarks. On USIII @@ -30,14 +31,12 @@ # is pipelined, which in turn *might* be impossible to match... On # additional note, SPARC64 V implements FP Multiply-Add instruction, # which is perfectly usable in this context... In other words, as far -# as HAL/Fujitsu SPARC64 family goes, talk to the author:-) +# as Fujitsu SPARC64 V goes, talk to the author:-) # The implementation implies following "non-natural" limitations on # input arguments: # - num may not be less than 4; # - num has to be even; -# - ap, bp, rp, np has to be 64-bit aligned [which is not a problem -# as long as BIGNUM.d are malloc-ated]; # Failure to meet either condition has no fatal effects, simply # doesn't give any performance gain. @@ -47,6 +46,15 @@ # noticeable(!) improvement); # - dedicated squaring procedure[?]; +###################################################################### +# November 2006 +# +# Modulo-scheduled inner loops allow to interleave floating point and +# integer instructions and minimize Read-After-Write penalties. This +# results in *further* 20-50% perfromance improvement [depending on +# key length, more for longer keys] on USI&II cores and 30-80% - on +# USIII&IV. + $fname="bn_mul_mont_fpu"; $bits=32; for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } @@ -113,15 +121,12 @@ $nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62"; $ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load $code=<<___; -.ident "UltraSPARC Montgomery multiply by " .section ".text",#alloc,#execinstr .global $fname .align 32 $fname: save %sp,-$frame-$locals,%sp - sethi %hi(0xffff),$mask - or $mask,%lo(0xffff),$mask cmp $num,4 bl,a,pn %icc,.Lret @@ -129,14 +134,11 @@ $fname: andcc $num,1,%g0 ! $num has to be even... bnz,a,pn %icc,.Lret clr %i0 ! signal "unsupported input value" - or $bp,$ap,%l0 + srl $num,1,$num - or $rp,$np,%l1 - or %l0,%l1,%l0 - andcc %l0,7,%g0 ! ...and pointers has to be 8-byte aligned - bnz,a,pn %icc,.Lret - clr %i0 ! signal "unsupported input value" + sethi %hi(0xffff),$mask ld [%i4+0],$n0 ! $n0 reassigned, remember? + or $mask,%lo(0xffff),$mask ld [%i4+4],%o0 sllx %o0,32,%o0 or %o0,$n0,$n0 ! $n0=n0[1].n0[0] @@ -173,12 +175,12 @@ $fname: add $ap,$j,%o3 add $bp,$i,%o4 - ldx [$bp+$i],%o0 ! bp[0] - ldx [$ap+$j],%o1 ! ap[0] - sllx %o0,32,%g1 - sllx %o1,32,%g5 - srlx %o0,32,%o0 - srlx %o1,32,%o1 + ld [%o3+4],%g1 ! bp[0] + ld [%o3+0],%o0 + ld [%o4+4],%g5 ! ap[0] + sllx %g1,32,%g1 + ld [%o4+0],%o1 + sllx %g5,32,%g5 or %g1,%o0,%o0 or %g5,%o1,%o1 @@ -261,20 +263,55 @@ $fname: fdtox $nlod,$nlod std $nloa,[%sp+$bias+$frame+0] + add $j,8,$j std $nlob,[%sp+$bias+$frame+8] + add $ap,$j,%o4 std $nloc,[%sp+$bias+$frame+16] + add $np,$j,%o5 std $nlod,[%sp+$bias+$frame+24] + + ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o4+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + fxtod $alo,$alo + fxtod $ahi,$ahi + fxtod $nlo,$nlo + fxtod $nhi,$nhi + ldx [%sp+$bias+$frame+0],%o0 + fmuld $alo,$ba,$aloa ldx [%sp+$bias+$frame+8],%o1 + fmuld $nlo,$na,$nloa ldx [%sp+$bias+$frame+16],%o2 + fmuld $alo,$bb,$alob ldx [%sp+$bias+$frame+24],%o3 + fmuld $nlo,$nb,$nlob srlx %o0,16,%o7 + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fmuld $alo,$bc,$aloc add %o7,%o1,%o1 + std $ahi,[$ap_h+$j] + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc srlx %o1,16,%o7 + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fmuld $alo,$bd,$alod add %o7,%o2,%o2 + std $nhi,[$np_h+$j] + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod srlx %o2,16,%o7 + fmuld $ahi,$ba,$ahia add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia !and %o0,$mask,%o0 !and %o1,$mask,%o1 !and %o2,$mask,%o2 @@ -285,20 +322,48 @@ $fname: !or %o2,%o0,%o0 !or %o7,%o0,%o0 ! 64-bit result srlx %o3,16,%g1 ! 34-bit carry + fmuld $ahi,$bb,$ahib + + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $dota,$nloa,$nloa + faddd $dotb,$nlob,$nlob + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + addcc $j,8,$j + std $nloc,[%sp+$bias+$frame+16] + bz,pn %icc,.L1stskip + std $nlod,[%sp+$bias+$frame+24] - ba .L1st - add $j,8,$j -.align 32 +.align 32 ! incidentally already aligned ! .L1st: - add $ap,$j,%o3 - add $np,$j,%o4 - ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words + add $ap,$j,%o4 + add $np,$j,%o5 + ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words fzeros $alo - ld [%o3+4],$ahi_ + ld [%o4+4],$ahi_ fzeros $ahi - ld [%o4+0],$nlo_ ! load n[j] as pair of 32-bit words + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words fzeros $nlo - ld [%o4+4],$nhi_ + ld [%o5+4],$nhi_ fzeros $nhi fxtod $alo,$alo @@ -306,35 +371,61 @@ $fname: fxtod $nlo,$nlo fxtod $nhi,$nhi - std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + ldx [%sp+$bias+$frame+0],%o0 fmuld $alo,$ba,$aloa - std $ahi,[$ap_h+$j] + ldx [%sp+$bias+$frame+8],%o1 fmuld $nlo,$na,$nloa - std $nlo,[$np_l+$j] ! save smashed np[j] in double format + ldx [%sp+$bias+$frame+16],%o2 fmuld $alo,$bb,$alob - std $nhi,[$np_h+$j] + ldx [%sp+$bias+$frame+24],%o3 fmuld $nlo,$nb,$nlob + + srlx %o0,16,%o7 + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format fmuld $alo,$bc,$aloc - faddd $aloa,$nloa,$nloa + add %o7,%o1,%o1 + std $ahi,[$ap_h+$j] + faddd $aloa,$nloa,$nloa fmuld $nlo,$nc,$nloc + srlx %o1,16,%o7 + std $nlo,[$np_l+$j] ! save smashed np[j] in double format fmuld $alo,$bd,$alod - faddd $alob,$nlob,$nlob + add %o7,%o2,%o2 + std $nhi,[$np_h+$j] + faddd $alob,$nlob,$nlob fmuld $nlo,$nd,$nlod + srlx %o2,16,%o7 fmuld $ahi,$ba,$ahia - faddd $aloc,$nloc,$nloc + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + faddd $aloc,$nloc,$nloc fmuld $nhi,$na,$nhia + and %o1,$mask,%o1 + and %o2,$mask,%o2 fmuld $ahi,$bb,$ahib - faddd $alod,$nlod,$nlod + sllx %o1,16,%o1 + faddd $alod,$nlod,$nlod fmuld $nhi,$nb,$nhib + sllx %o2,32,%o2 fmuld $ahi,$bc,$ahic - faddd $ahia,$nhia,$nhia + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahia,$nhia,$nhia fmuld $nhi,$nc,$nhic + or %o2,%o0,%o0 fmuld $ahi,$bd,$ahid - faddd $ahib,$nhib,$nhib + or %o7,%o0,%o0 ! 64-bit result + faddd $ahib,$nhib,$nhib fmuld $nhi,$nd,$nhid + addcc %g1,%o0,%o0 + faddd $dota,$nloa,$nloa + srlx %o3,16,%g1 ! 34-bit carry + faddd $dotb,$nlob,$nlob + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1]= - faddd $dota,$nloa,$nloa - faddd $dotb,$nlob,$nlob faddd $ahic,$nhic,$dota ! $nhic faddd $ahid,$nhid,$dotb ! $nhid @@ -350,13 +441,24 @@ $fname: std $nlob,[%sp+$bias+$frame+8] std $nloc,[%sp+$bias+$frame+16] std $nlod,[%sp+$bias+$frame+24] + + addcc $j,8,$j + bnz,pt %icc,.L1st + add $tp,8,$tp + +.L1stskip: + fdtox $dota,$dota + fdtox $dotb,$dotb + ldx [%sp+$bias+$frame+0],%o0 ldx [%sp+$bias+$frame+8],%o1 ldx [%sp+$bias+$frame+16],%o2 ldx [%sp+$bias+$frame+24],%o3 srlx %o0,16,%o7 + std $dota,[%sp+$bias+$frame+32] add %o7,%o1,%o1 + std $dotb,[%sp+$bias+$frame+40] srlx %o1,16,%o7 add %o7,%o2,%o2 srlx %o2,16,%o7 @@ -370,35 +472,28 @@ $fname: or %o1,%o0,%o0 or %o2,%o0,%o0 or %o7,%o0,%o0 ! 64-bit result + ldx [%sp+$bias+$frame+32],%o4 addcc %g1,%o0,%o0 + ldx [%sp+$bias+$frame+40],%o5 srlx %o3,16,%g1 ! 34-bit carry bcs,a %xcc,.+8 add %g1,1,%g1 stx %o0,[$tp] ! tp[j-1]= - addcc $j,8,$j - bnz,pt %icc,.L1st add $tp,8,$tp - - fdtox $dota,$dota - fdtox $dotb,$dotb - std $dota,[%sp+$bias+$frame+32] - std $dotb,[%sp+$bias+$frame+40] - ldx [%sp+$bias+$frame+32],%o0 - ldx [%sp+$bias+$frame+40],%o1 - srlx %o0,16,%o7 - add %o7,%o1,%o1 - and %o0,$mask,%o0 - sllx %o1,16,%o7 - or %o7,%o0,%o0 - addcc %g1,%o0,%o0 - srlx %o1,48,%g1 + srlx %o4,16,%o7 + add %o7,%o5,%o5 + and %o4,$mask,%o4 + sllx %o5,16,%o7 + or %o7,%o4,%o4 + addcc %g1,%o4,%o4 + srlx %o5,48,%g1 bcs,a %xcc,.+8 add %g1,1,%g1 mov %g1,$carry - stx %o0,[$tp] ! tp[num-1]= + stx %o4,[$tp] ! tp[num-1]= ba .Louter add $i,8,$i @@ -407,14 +502,15 @@ $fname: sub %g0,$num,$j ! j=-num add %sp,$bias+$frame+$locals,$tp + add $ap,$j,%o3 add $bp,$i,%o4 - ldx [$bp+$i],%o0 ! bp[i] - ldx [$ap+$j],%o1 ! ap[0] - sllx %o0,32,%g1 - sllx %o1,32,%g5 - srlx %o0,32,%o0 - srlx %o1,32,%o1 + ld [%o3+4],%g1 ! bp[i] + ld [%o3+0],%o0 + ld [%o4+4],%g5 ! ap[0] + sllx %g1,32,%g1 + ld [%o4+0],%o1 + sllx %g5,32,%g5 or %g1,%o0,%o0 or %g5,%o1,%o1 @@ -485,37 +581,84 @@ $fname: std $nloa,[%sp+$bias+$frame+0] std $nlob,[%sp+$bias+$frame+8] std $nloc,[%sp+$bias+$frame+16] + add $j,8,$j std $nlod,[%sp+$bias+$frame+24] + + ldd [$ap_l+$j],$alo ! load a[j] in double format + ldd [$ap_h+$j],$ahi + ldd [$np_l+$j],$nlo ! load n[j] in double format + ldd [$np_h+$j],$nhi + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc ldx [%sp+$bias+$frame+0],%o0 + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc ldx [%sp+$bias+$frame+8],%o1 + fmuld $alo,$bd,$alod ldx [%sp+$bias+$frame+16],%o2 + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod ldx [%sp+$bias+$frame+24],%o3 + fmuld $ahi,$ba,$ahia srlx %o0,16,%o7 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia add %o7,%o1,%o1 + fmuld $ahi,$bb,$ahib srlx %o1,16,%o7 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib add %o7,%o2,%o2 + fmuld $ahi,$bc,$ahic srlx %o2,16,%o7 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] ! why? and %o0,$mask,%o0 + fmuld $ahi,$bd,$ahid and %o1,$mask,%o1 and %o2,$mask,%o2 + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid sllx %o1,16,%o1 + faddd $dota,$nloa,$nloa sllx %o2,32,%o2 + faddd $dotb,$nlob,$nlob sllx %o3,48,%o7 or %o1,%o0,%o0 + faddd $ahic,$nhic,$dota ! $nhic or %o2,%o0,%o0 + faddd $ahid,$nhid,$dotb ! $nhid or %o7,%o0,%o0 ! 64-bit result ldx [$tp],%o7 + faddd $nloc,$nhia,$nloc addcc %o7,%o0,%o0 ! end-of-why? + faddd $nlod,$nhib,$nlod srlx %o3,16,%g1 ! 34-bit carry + fdtox $nloa,$nloa bcs,a %xcc,.+8 add %g1,1,%g1 + + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + addcc $j,8,$j + std $nloc,[%sp+$bias+$frame+16] + bz,pn %icc,.Linnerskip + std $nlod,[%sp+$bias+$frame+24] ba .Linner - add $j,8,$j + nop .align 32 .Linner: ldd [$ap_l+$j],$alo ! load a[j] in double format @@ -528,48 +671,85 @@ $fname: fmuld $alo,$bb,$alob fmuld $nlo,$nb,$nlob fmuld $alo,$bc,$aloc - faddd $aloa,$nloa,$nloa + ldx [%sp+$bias+$frame+0],%o0 + faddd $aloa,$nloa,$nloa fmuld $nlo,$nc,$nloc + ldx [%sp+$bias+$frame+8],%o1 fmuld $alo,$bd,$alod - faddd $alob,$nlob,$nlob + ldx [%sp+$bias+$frame+16],%o2 + faddd $alob,$nlob,$nlob fmuld $nlo,$nd,$nlod + ldx [%sp+$bias+$frame+24],%o3 fmuld $ahi,$ba,$ahia - faddd $aloc,$nloc,$nloc + + srlx %o0,16,%o7 + faddd $aloc,$nloc,$nloc fmuld $nhi,$na,$nhia + add %o7,%o1,%o1 fmuld $ahi,$bb,$ahib - faddd $alod,$nlod,$nlod + srlx %o1,16,%o7 + faddd $alod,$nlod,$nlod fmuld $nhi,$nb,$nhib + add %o7,%o2,%o2 fmuld $ahi,$bc,$ahic - faddd $ahia,$nhia,$nhia + srlx %o2,16,%o7 + faddd $ahia,$nhia,$nhia fmuld $nhi,$nc,$nhic + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 fmuld $ahi,$bd,$ahid - faddd $ahib,$nhib,$nhib + and %o1,$mask,%o1 + and %o2,$mask,%o2 + faddd $ahib,$nhib,$nhib fmuld $nhi,$nd,$nhid + sllx %o1,16,%o1 + faddd $dota,$nloa,$nloa + sllx %o2,32,%o2 + faddd $dotb,$nlob,$nlob + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahic,$nhic,$dota ! $nhic + or %o2,%o0,%o0 + faddd $ahid,$nhid,$dotb ! $nhid + or %o7,%o0,%o0 ! 64-bit result + faddd $nloc,$nhia,$nloc + addcc %g1,%o0,%o0 + ldx [$tp+8],%o7 ! tp[j] + faddd $nlod,$nhib,$nlod + srlx %o3,16,%g1 ! 34-bit carry + fdtox $nloa,$nloa + bcs,a %xcc,.+8 + add %g1,1,%g1 + fdtox $nlob,$nlob + addcc %o7,%o0,%o0 + fdtox $nloc,$nloc + bcs,a %xcc,.+8 + add %g1,1,%g1 - faddd $dota,$nloa,$nloa - faddd $dotb,$nlob,$nlob - faddd $ahic,$nhic,$dota ! $nhic - faddd $ahid,$nhid,$dotb ! $nhid - - faddd $nloc,$nhia,$nloc - faddd $nlod,$nhib,$nlod - - fdtox $nloa,$nloa - fdtox $nlob,$nlob - fdtox $nloc,$nloc - fdtox $nlod,$nlod + stx %o0,[$tp] ! tp[j-1] + fdtox $nlod,$nlod std $nloa,[%sp+$bias+$frame+0] std $nlob,[%sp+$bias+$frame+8] std $nloc,[%sp+$bias+$frame+16] + addcc $j,8,$j std $nlod,[%sp+$bias+$frame+24] + bnz,pt %icc,.Linner + add $tp,8,$tp + +.Linnerskip: + fdtox $dota,$dota + fdtox $dotb,$dotb + ldx [%sp+$bias+$frame+0],%o0 ldx [%sp+$bias+$frame+8],%o1 ldx [%sp+$bias+$frame+16],%o2 ldx [%sp+$bias+$frame+24],%o3 srlx %o0,16,%o7 + std $dota,[%sp+$bias+$frame+32] add %o7,%o1,%o1 + std $dotb,[%sp+$bias+$frame+40] srlx %o1,16,%o7 add %o7,%o2,%o2 srlx %o2,16,%o7 @@ -582,41 +762,34 @@ $fname: sllx %o3,48,%o7 or %o1,%o0,%o0 or %o2,%o0,%o0 + ldx [%sp+$bias+$frame+32],%o4 or %o7,%o0,%o0 ! 64-bit result + ldx [%sp+$bias+$frame+40],%o5 addcc %g1,%o0,%o0 + ldx [$tp+8],%o7 ! tp[j] srlx %o3,16,%g1 ! 34-bit carry bcs,a %xcc,.+8 add %g1,1,%g1 - ldx [$tp+8],%o7 ! tp[j] addcc %o7,%o0,%o0 bcs,a %xcc,.+8 add %g1,1,%g1 stx %o0,[$tp] ! tp[j-1] - addcc $j,8,$j - bnz,pt %icc,.Linner add $tp,8,$tp - - fdtox $dota,$dota - fdtox $dotb,$dotb - std $dota,[%sp+$bias+$frame+32] - std $dotb,[%sp+$bias+$frame+40] - ldx [%sp+$bias+$frame+32],%o0 - ldx [%sp+$bias+$frame+40],%o1 - srlx %o0,16,%o7 - add %o7,%o1,%o1 - and %o0,$mask,%o0 - sllx %o1,16,%o7 - or %o7,%o0,%o0 - addcc %g1,%o0,%o0 - srlx %o1,48,%g1 + srlx %o4,16,%o7 + add %o7,%o5,%o5 + and %o4,$mask,%o4 + sllx %o5,16,%o7 + or %o7,%o4,%o4 + addcc %g1,%o4,%o4 + srlx %o5,48,%g1 bcs,a %xcc,.+8 add %g1,1,%g1 - addcc $carry,%o0,%o0 - stx %o0,[$tp] ! tp[num-1] + addcc $carry,%o4,%o4 + stx %o4,[$tp] ! tp[num-1] mov %g1,$carry bcs,a %xcc,.+8 add $carry,1,$carry @@ -625,45 +798,51 @@ $fname: bnz %icc,.Louter nop - sub %g0,$num,%o7 ! n=-num - cmp $carry,0 ! clears %icc.c - bne,pn %icc,.Lsub add $tp,8,$tp ! adjust tp to point at the end + orn %g0,%g0,%g4 + sub %g0,$num,%o7 ! n=-num + ba .Lsub + subcc %g0,%g0,%g0 ! clear %icc.c - ld [$tp-8],%o0 - ld [$np-4],%o1 - cmp %o0,%o1 ! compare topmost words - bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken - nop - -.align 32,0x1000000 +.align 32 .Lsub: - ldd [$tp+%o7],%o0 - ldd [$np+%o7],%o2 - subccc %o1,%o2,%o2 - subccc %o0,%o3,%o3 - std %o2,[$rp+%o7] + ldx [$tp+%o7],%o0 + add $np,%o7,%g1 + ld [%g1+0],%o2 + ld [%g1+4],%o3 + srlx %o0,32,%o1 + subccc %o0,%o2,%o2 + add $rp,%o7,%g1 + subccc %o1,%o3,%o3 + st %o2,[%g1+0] add %o7,8,%o7 brnz,pt %o7,.Lsub - nop - subccc $carry,0,$carry - bcc,pt %icc,.Lzap + st %o3,[%g1+4] + subc $carry,0,%g4 sub %g0,$num,%o7 ! n=-num + ba .Lcopy + nop -.align 16,0x1000000 +.align 32 .Lcopy: ldx [$tp+%o7],%o0 + add $rp,%o7,%g1 + ld [%g1+0],%o2 + ld [%g1+4],%o3 + stx %g0,[$tp+%o7] + and %o0,%g4,%o0 srlx %o0,32,%o1 - std %o0,[$rp+%o7] + andn %o2,%g4,%o2 + andn %o3,%g4,%o3 + or %o2,%o0,%o0 + or %o3,%o1,%o1 + st %o0,[%g1+0] add %o7,8,%o7 brnz,pt %o7,.Lcopy - nop - ba .Lzap + st %o1,[%g1+4] sub %g0,$num,%o7 ! n=-num -.align 32 .Lzap: - stx %g0,[$tp+%o7] stx %g0,[$ap_l+%o7] stx %g0,[$ap_h+%o7] stx %g0,[$np_l+%o7] @@ -681,6 +860,8 @@ $fname: restore .type $fname,#function .size $fname,(.-$fname) +.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by " +.align 32 ___ $code =~ s/\`([^\`]*)\`/eval($1)/gem;