# implementations from compatibility matrix. But the rest, whole Sun
# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
# VIS extension instructions used in this module. This is considered
-# good enough to recommend HAL SPARC64 users [if any] to simply fall
-# down to no-asm configuration.
+# good enough to not care about HAL SPARC64 users [if any] who have
+# integer-only pure SPARCv9 module to "fall down" to.
# USI&II cores currently exhibit uniform 2x improvement [over pre-
# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
# is pipelined, which in turn *might* be impossible to match... On
# additional note, SPARC64 V implements FP Multiply-Add instruction,
# which is perfectly usable in this context... In other words, as far
-# as HAL/Fujitsu SPARC64 family goes, talk to the author:-)
+# as Fujitsu SPARC64 V goes, talk to the author:-)
# The implementation implies following "non-natural" limitations on
# input arguments:
# - num may not be less than 4;
# - num has to be even;
-# - ap, bp, rp, np has to be 64-bit aligned [which is not a problem
-# as long as BIGNUM.d are malloc-ated];
# Failure to meet either condition has no fatal effects, simply
# doesn't give any performance gain.
$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
$code=<<___;
-.ident "UltraSPARC Montgomery multiply by <appro\@fy.chalmers.se>"
.section ".text",#alloc,#execinstr
.global $fname
.align 32
$fname:
save %sp,-$frame-$locals,%sp
- sethi %hi(0xffff),$mask
- or $mask,%lo(0xffff),$mask
cmp $num,4
bl,a,pn %icc,.Lret
andcc $num,1,%g0 ! $num has to be even...
bnz,a,pn %icc,.Lret
clr %i0 ! signal "unsupported input value"
- or $bp,$ap,%l0
+
srl $num,1,$num
- or $rp,$np,%l1
- or %l0,%l1,%l0
- andcc %l0,7,%g0 ! ...and pointers has to be 8-byte aligned
- bnz,a,pn %icc,.Lret
- clr %i0 ! signal "unsupported input value"
+ sethi %hi(0xffff),$mask
ld [%i4+0],$n0 ! $n0 reassigned, remember?
+ or $mask,%lo(0xffff),$mask
ld [%i4+4],%o0
sllx %o0,32,%o0
or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
add $ap,$j,%o3
add $bp,$i,%o4
- ldx [$bp+$i],%o0 ! bp[0]
- ldx [$ap+$j],%o1 ! ap[0]
- sllx %o0,32,%g1
- sllx %o1,32,%g5
- srlx %o0,32,%o0
- srlx %o1,32,%o1
+ ld [%o3+4],%g1 ! bp[0]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
or %g1,%o0,%o0
or %g5,%o1,%o1
bz,pn %icc,.L1stskip
std $nlod,[%sp+$bias+$frame+24]
\f
-.align 32,0x1000000
+.align 32 ! incidentally already aligned !
.L1st:
add $ap,$j,%o4
add $np,$j,%o5
add $tp,8,$tp
\f
.L1stskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
ldx [%sp+$bias+$frame+0],%o0
ldx [%sp+$bias+$frame+8],%o1
ldx [%sp+$bias+$frame+16],%o2
ldx [%sp+$bias+$frame+24],%o3
srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
srlx %o1,16,%o7
add %o7,%o2,%o2
srlx %o2,16,%o7
or %o1,%o0,%o0
or %o2,%o0,%o0
or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+32],%o4
addcc %g1,%o0,%o0
+ ldx [%sp+$bias+$frame+40],%o5
srlx %o3,16,%g1 ! 34-bit carry
bcs,a %xcc,.+8
add %g1,1,%g1
stx %o0,[$tp] ! tp[j-1]=
add $tp,8,$tp
-\f
- fdtox $dota,$dota
- fdtox $dotb,$dotb
- std $dota,[%sp+$bias+$frame+32]
- std $dotb,[%sp+$bias+$frame+40]
- ldx [%sp+$bias+$frame+32],%o0
- ldx [%sp+$bias+$frame+40],%o1
- srlx %o0,16,%o7
- add %o7,%o1,%o1
- and %o0,$mask,%o0
- sllx %o1,16,%o7
- or %o7,%o0,%o0
- addcc %g1,%o0,%o0
- srlx %o1,48,%g1
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
bcs,a %xcc,.+8
add %g1,1,%g1
mov %g1,$carry
- stx %o0,[$tp] ! tp[num-1]=
+ stx %o4,[$tp] ! tp[num-1]=
\f
ba .Louter
add $i,8,$i
sub %g0,$num,$j ! j=-num
add %sp,$bias+$frame+$locals,$tp
+ add $ap,$j,%o3
add $bp,$i,%o4
- ldx [$bp+$i],%o0 ! bp[i]
- ldx [$ap+$j],%o1 ! ap[0]
- sllx %o0,32,%g1
- sllx %o1,32,%g5
- srlx %o0,32,%o0
- srlx %o1,32,%o1
+ ld [%o3+4],%g1 ! bp[i]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
or %g1,%o0,%o0
or %g5,%o1,%o1
bz,pn %icc,.Linnerskip
std $nlod,[%sp+$bias+$frame+24]
\f
-.align 32,0x1000000
+ ba .Linner
+ nop
+.align 32
.Linner:
ldd [$ap_l+$j],$alo ! load a[j] in double format
ldd [$ap_h+$j],$ahi
or %o7,%o0,%o0 ! 64-bit result
faddd $nloc,$nhia,$nloc
addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
faddd $nlod,$nhib,$nlod
srlx %o3,16,%g1 ! 34-bit carry
fdtox $nloa,$nloa
bcs,a %xcc,.+8
add %g1,1,%g1
- ldx [$tp+8],%o7 ! tp[j]
fdtox $nlob,$nlob
addcc %o7,%o0,%o0
fdtox $nloc,$nloc
bnz %icc,.Louter
nop
\f
- sub %g0,$num,%o7 ! n=-num
- cmp $carry,0 ! clears %icc.c
- bne,pn %icc,.Lsub
add $tp,8,$tp ! adjust tp to point at the end
+ orn %g0,%g0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
- ld [$tp-8],%o0
- ld [$np-4],%o1
- cmp %o0,%o1 ! compare topmost words
- bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken
- nop
-
-.align 32,0x1000000
+.align 32
.Lsub:
- ldd [$tp+%o7],%o0
- ldd [$np+%o7],%o2
- subccc %o1,%o2,%o2
- subccc %o0,%o3,%o3
- std %o2,[$rp+%o7]
+ ldx [$tp+%o7],%o0
+ add $np,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ srlx %o0,32,%o1
+ subccc %o0,%o2,%o2
+ add $rp,%o7,%g1
+ subccc %o1,%o3,%o3
+ st %o2,[%g1+0]
add %o7,8,%o7
brnz,pt %o7,.Lsub
- nop
- subccc $carry,0,$carry
- bcc,pt %icc,.Lzap
+ st %o3,[%g1+4]
+ subc $carry,0,%g4
sub %g0,$num,%o7 ! n=-num
+ ba .Lcopy
+ nop
-.align 16,0x1000000
+.align 32
.Lcopy:
ldx [$tp+%o7],%o0
+ add $rp,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ stx %g0,[$tp+%o7]
+ and %o0,%g4,%o0
srlx %o0,32,%o1
- std %o0,[$rp+%o7]
+ andn %o2,%g4,%o2
+ andn %o3,%g4,%o3
+ or %o2,%o0,%o0
+ or %o3,%o1,%o1
+ st %o0,[%g1+0]
add %o7,8,%o7
brnz,pt %o7,.Lcopy
- nop
- ba .Lzap
+ st %o1,[%g1+4]
sub %g0,$num,%o7 ! n=-num
-.align 32
.Lzap:
- stx %g0,[$tp+%o7]
stx %g0,[$ap_l+%o7]
stx %g0,[$ap_h+%o7]
stx %g0,[$np_l+%o7]
restore
.type $fname,#function
.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
___
$code =~ s/\`([^\`]*)\`/eval($1)/gem;