# forms are granted according to the OpenSSL license.
# ====================================================================
+# October 2005
+#
# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
# Because unlike integer multiplier, which simply stalls whole CPU,
# FPU is fully pipelined and can effectively emit 48 bit partial
# product every cycle. Why not blended SPARC v9? One can argue that
# making this module dependent on UltraSPARC VIS extension limits its
-# binary compatibility. Very well may be, but the simple fact is that
-# there is no known SPARC v9 implementation, which does not implement
-# VIS. Even brand new Fujitsu's SPARC64 V is equipped with VIS unit.
+# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
+# implementations from compatibility matrix. But the rest, whole Sun
+# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
+# VIS extension instructions used in this module. This is considered
+# good enough to recommend HAL SPARC64 users [if any] to simply fall
+# down to no-asm configuration.
# USI&II cores currently exhibit uniform 2x improvement [over pre-
# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
# performance improves few percents for shorter keys and worsens few
-# percents for longer keys. This's because USIII integer multiplier
+# percents for longer keys. This is because USIII integer multiplier
# is >3x faster than USI&II one, which is harder to match [but see
# TODO list below]. It should also be noted that SPARC64 V features
# out-of-order execution, which *might* mean that integer multiplier
-# is pipelined, which in turn *might* be impossible to match...
-#
+# is pipelined, which in turn *might* be impossible to match... On
+# additional note, SPARC64 V implements FP Multiply-Add instruction,
+# which is perfectly usable in this context... In other words, as far
+# as HAL/Fujitsu SPARC64 family goes, talk to the author:-)
+
+# The implementation implies following "non-natural" limitations on
+# input arguments:
+# - num may not be less than 4;
+# - num has to be even;
+# - ap, bp, rp, np has to be 64-bit aligned [which is not a problem
+# as long as BIGNUM.d are malloc-ated];
+# Failure to meet either condition has no fatal effects, simply
+# doesn't give any performance gain.
+
# TODO:
-# - complete 32-bit adaptation (requires universal changes to
-# BN_MONT_CTX and bn_mul_mont prototype, but nothing really
-# unmanagable:-);
# - modulo-schedule inner loop for better performance (on in-order
# execution core such as UltraSPARC this shall result in further
# noticeable(!) improvement);
# - dedicated squaring procedure[?];
-$fname="bn_mul_mont";
+$fname="bn_mul_mont_fpu";
$bits=32;
-for (@ARGV) {
- $bits=64 if (/\-m64/ || /\-xarch\=v9/);
- $vis=1 if (/\-mcpu=ultra/ || /\-xarch\=v[9|8plus]\S/);
-}
-
-if (!$vis || $bits==32) { # 32-bit is not supported just yet...
-print<<___;
-.section ".text",#alloc,#execinstr
-.global $fname
-$fname:
- retl
- xor %o0,%o0,%o0 ! just signal "not implemented"
-.type $fname,#function
-.size $fname,(.-$fname)
-___
-exit;
-}
+for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
if ($bits==64) {
$bias=2047;
$ap="%i1"; # const BN_ULONG *ap,
$bp="%i2"; # const BN_ULONG *bp,
$np="%i3"; # const BN_ULONG *np,
-$n0="%i4"; # BN_ULONG n0,
+$n0="%i4"; # const BN_ULONG *n0,
$num="%i5"; # int num);
-$tp="%l0";
+$tp="%l0"; # t[num]
$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
$ap_h="%l2"; # to these four vectors as double-precision FP values.
$np_l="%l3"; # This way a bunch of fxtods are eliminated in second
$j="%l6";
$mask="%l7"; # 16-bit mask, 0xffff
-$n0="%g4"; # reassigned!!!
-$carry="%i4"; # reassigned!!! [only 1 bit is used]
+$n0="%g4"; # reassigned(!) to "64-bit" register
+$carry="%i4"; # %i4 reused(!) for a carry bit
# FP register naming chart
#
.global $fname
.align 32
$fname:
- save %sp,-$frame,%sp
+ save %sp,-$frame-$locals,%sp
sethi %hi(0xffff),$mask
- sll $num,3,$num ! num*=8
or $mask,%lo(0xffff),$mask
- mov %i4,$n0 ! reassigned, remember?
+
+ cmp $num,4
+ bl,a,pn %icc,.Lret
+ clr %i0
+ andcc $num,1,%g0 ! $num has to be even...
+ bnz,a,pn %icc,.Lret
+ clr %i0 ! signal "unsupported input value"
+ or $bp,$ap,%l0
+ srl $num,1,$num
+ or $rp,$np,%l1
+ or %l0,%l1,%l0
+ andcc %l0,7,%g0 ! ...and pointers has to be 8-byte aligned
+ bnz,a,pn %icc,.Lret
+ clr %i0 ! signal "unsupported input value"
+ ld [%i4+0],$n0 ! $n0 reassigned, remember?
+ ld [%i4+4],%o0
+ sllx %o0,32,%o0
+ or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
+
+ sll $num,3,$num ! num*=8
add %sp,$bias,%o0 ! real top of stack
sll $num,2,%o1
add %o1,$num,%o1 ! %o1=num*5
sub %o0,%o1,%o0
- sub %o0,$locals,%o0
and %o0,-2048,%o0 ! optimize TLB utilization
- sub %o0,$bias,%sp ! alloca
+ sub %o0,$bias,%sp ! alloca(5*num*8)
- rd %asi,%o7
+ rd %asi,%o7 ! save %asi
add %sp,$bias+$frame+$locals,$tp
add $tp,$num,$ap_l
- add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vector ends !
+ add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
add $ap_l,$num,$ap_h
add $ap_h,$num,$np_l
add $np_l,$num,$np_h
add $bp,$num,$bp
add $np,$num,$np
- stx %o7,[%sp+$bias+$frame+48]
+ stx %o7,[%sp+$bias+$frame+48] ! save %asi
\f
- sub %g0,$num,$i
- sub %g0,$num,$j
+ sub %g0,$num,$i ! i=-num
+ sub %g0,$num,$j ! j=-num
add $ap,$j,%o3
add $bp,$i,%o4
+
ldx [$bp+$i],%o0 ! bp[0]
- add $np,$j,%o5
- add %sp,$bias+$frame+0,%o7
ldx [$ap+$j],%o1 ! ap[0]
+ sllx %o0,32,%g1
+ sllx %o1,32,%g5
+ srlx %o0,32,%o0
+ srlx %o1,32,%o1
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ add $np,$j,%o5
mulx %o1,%o0,%o0 ! ap[0]*bp[0]
mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
- stx %o0,[%o7]
+ stx %o0,[%sp+$bias+$frame+0]
- ld [%o3+4],$alo_ ! load a[j] as pair of 32-bit words
- fxors $alo,$alo,$alo
- ld [%o3+0],$ahi_
- fxors $ahi,$ahi,$ahi
- ld [%o5+4],$nlo_ ! load n[j] as pair of 32-bit words
- fxors $nlo,$nlo,$nlo
- ld [%o5+0],$nhi_
- fxors $nhi,$nhi,$nhi
+ ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o3+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
! transfer b[i] to FPU as 4x16-bit values
- ldda [%o4+6]%asi,$ba
+ ldda [%o4+2]%asi,$ba
fxtod $alo,$alo
- ldda [%o4+4]%asi,$bb
+ ldda [%o4+0]%asi,$bb
fxtod $ahi,$ahi
- ldda [%o4+2]%asi,$bc
+ ldda [%o4+6]%asi,$bc
fxtod $nlo,$nlo
- ldda [%o4+0]%asi,$bd
+ ldda [%o4+4]%asi,$bd
fxtod $nhi,$nhi
! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
- ldda [%o7+6]%asi,$na
+ ldda [%sp+$bias+$frame+6]%asi,$na
fxtod $ba,$ba
- ldda [%o7+4]%asi,$nb
+ ldda [%sp+$bias+$frame+4]%asi,$nb
fxtod $bb,$bb
- ldda [%o7+2]%asi,$nc
+ ldda [%sp+$bias+$frame+2]%asi,$nc
fxtod $bc,$bc
- ldda [%o7+0]%asi,$nd
+ ldda [%sp+$bias+$frame+0]%asi,$nd
fxtod $bd,$bd
std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
std $nhi,[$np_h+$j]
fxtod $nd,$nd
- fmuld $alo,$ba,$aloa
- fmuld $nlo,$na,$nloa
- fmuld $alo,$bb,$alob
- fmuld $nlo,$nb,$nlob
- fmuld $alo,$bc,$aloc
- fmuld $nlo,$nc,$nloc
- faddd $aloa,$nloa,$nloa
- fmuld $alo,$bd,$alod
- fmuld $nlo,$nd,$nlod
- faddd $alob,$nlob,$nlob
- fmuld $ahi,$ba,$ahia
- fmuld $nhi,$na,$nhia
- faddd $aloc,$nloc,$nloc
- fmuld $ahi,$bb,$ahib
- fmuld $nhi,$nb,$nhib
- faddd $alod,$nlod,$nlod
- fmuld $ahi,$bc,$ahic
- fmuld $nhi,$nc,$nhic
- faddd $ahia,$nhia,$nhia
- fmuld $ahi,$bd,$ahid
- fmuld $nhi,$nd,$nhid
-
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
faddd $ahic,$nhic,$dota ! $nhic
faddd $ahid,$nhid,$dotb ! $nhid
!or %o7,%o0,%o0 ! 64-bit result
srlx %o3,16,%g1 ! 34-bit carry
\f
- ba .L1st
add $j,8,$j
-.align 32
+ add $ap,$j,%o4
+ add $np,$j,%o5
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$ba,$aloa
+ std $ahi,[$ap_h+$j]
+ fmuld $nlo,$na,$nloa
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bb,$alob
+ std $nhi,[$np_h+$j]
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $dota,$nloa,$nloa
+ faddd $dotb,$nlob,$nlob
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ std $nlod,[%sp+$bias+$frame+24]
+\f
+ addcc $j,8,$j
+ bz,pn %icc,.L1stskip
+.align 32,0x1000000
.L1st:
- add $ap,$j,%o3
- add $np,$j,%o4
- ld [%o3+4],$alo_ ! load a[j] as pair of 32-bit words
- fxors $alo,$alo,$alo
- ld [%o3+0],$ahi_
- fxors $ahi,$ahi,$ahi
- ld [%o4+4],$nlo_ ! load n[j] as pair of 32-bit words
- fxors $nlo,$nlo,$nlo
- ld [%o4+0],$nhi_
- fxors $nhi,$nhi,$nhi
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ add %o7,%o1,%o1
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ or %o7,%o0,%o0 ! 64-bit result
+ addcc %g1,%o0,%o0
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+
+
+ add $ap,$j,%o4
+ add $np,$j,%o5
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
fxtod $alo,$alo
fxtod $ahi,$ahi
fxtod $nhi,$nhi
std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
- fmuld $alo,$ba,$aloa
+ fmuld $alo,$ba,$aloa
std $ahi,[$ap_h+$j]
- fmuld $nlo,$na,$nloa
+ fmuld $nlo,$na,$nloa
std $nlo,[$np_l+$j] ! save smashed np[j] in double format
- fmuld $alo,$bb,$alob
+ fmuld $alo,$bb,$alob
std $nhi,[$np_h+$j]
- fmuld $nlo,$nb,$nlob
- fmuld $alo,$bc,$aloc
- fmuld $nlo,$nc,$nloc
- faddd $aloa,$nloa,$nloa
- fmuld $alo,$bd,$alod
- fmuld $nlo,$nd,$nlod
- faddd $alob,$nlob,$nlob
- fmuld $ahi,$ba,$ahia
- fmuld $nhi,$na,$nhia
- faddd $aloc,$nloc,$nloc
- fmuld $ahi,$bb,$ahib
- fmuld $nhi,$nb,$nhib
- faddd $alod,$nlod,$nlod
- fmuld $ahi,$bc,$ahic
- fmuld $nhi,$nc,$nhic
- faddd $ahia,$nhia,$nhia
- fmuld $ahi,$bd,$ahid
- fmuld $nhi,$nd,$nhid
- faddd $ahib,$nhib,$nhib
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
faddd $dota,$nloa,$nloa
faddd $dotb,$nlob,$nlob
std $nlob,[%sp+$bias+$frame+8]
std $nloc,[%sp+$bias+$frame+16]
std $nlod,[%sp+$bias+$frame+24]
+
+ addcc $j,8,$j
+ bnz,pt %icc,.L1st
+ add $tp,8,$tp
+\f
+.L1stskip:
ldx [%sp+$bias+$frame+0],%o0
ldx [%sp+$bias+$frame+8],%o1
ldx [%sp+$bias+$frame+16],%o2
add %g1,1,%g1
stx %o0,[$tp] ! tp[j-1]=
- add $j,8,$j
- brnz $j,.L1st
add $tp,8,$tp
\f
fdtox $dota,$dota
add $i,8,$i
.align 32
.Louter:
- sub %g0,$num,$j
+ sub %g0,$num,$j ! j=-num
add %sp,$bias+$frame+$locals,$tp
add $bp,$i,%o4
+
ldx [$bp+$i],%o0 ! bp[i]
- add %sp,$bias+$frame+0,%o7
ldx [$ap+$j],%o1 ! ap[0]
+ sllx %o0,32,%g1
+ sllx %o1,32,%g5
+ srlx %o0,32,%o0
+ srlx %o1,32,%o1
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
ldx [$tp],%o2 ! tp[0]
mulx %o1,%o0,%o0
addcc %o2,%o0,%o0
mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
- stx %o0,[%o7]
-
+ stx %o0,[%sp+$bias+$frame+0]
! transfer b[i] to FPU as 4x16-bit values
- ldda [%o4+6]%asi,$ba
- ldda [%o4+4]%asi,$bb
- ldda [%o4+2]%asi,$bc
- ldda [%o4+0]%asi,$bd
+ ldda [%o4+2]%asi,$ba
+ ldda [%o4+0]%asi,$bb
+ ldda [%o4+6]%asi,$bc
+ ldda [%o4+4]%asi,$bd
! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
- ldda [%o7+6]%asi,$na
+ ldda [%sp+$bias+$frame+6]%asi,$na
fxtod $ba,$ba
- ldda [%o7+4]%asi,$nb
+ ldda [%sp+$bias+$frame+4]%asi,$nb
fxtod $bb,$bb
- ldda [%o7+2]%asi,$nc
+ ldda [%sp+$bias+$frame+2]%asi,$nc
fxtod $bc,$bc
- ldda [%o7+0]%asi,$nd
+ ldda [%sp+$bias+$frame+0]%asi,$nd
fxtod $bd,$bd
ldd [$ap_l+$j],$alo ! load a[j] in double format
fxtod $na,$na
ldd [$np_h+$j],$nhi
fxtod $nd,$nd
- fmuld $alo,$ba,$aloa
- fmuld $nlo,$na,$nloa
- fmuld $alo,$bb,$alob
- fmuld $nlo,$nb,$nlob
- fmuld $alo,$bc,$aloc
- fmuld $nlo,$nc,$nloc
- faddd $aloa,$nloa,$nloa
- fmuld $alo,$bd,$alod
- fmuld $nlo,$nd,$nlod
- faddd $alob,$nlob,$nlob
- fmuld $ahi,$ba,$ahia
- fmuld $nhi,$na,$nhia
- faddd $aloc,$nloc,$nloc
- fmuld $ahi,$bb,$ahib
- fmuld $nhi,$nb,$nhib
- faddd $alod,$nlod,$nlod
- fmuld $ahi,$bc,$ahic
- fmuld $nhi,$nc,$nhic
- faddd $ahia,$nhia,$nhia
- fmuld $ahi,$bd,$ahid
- fmuld $nhi,$nd,$nhid
-
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
faddd $ahic,$nhic,$dota ! $nhic
faddd $ahid,$nhid,$dotb ! $nhid
bcs,a %xcc,.+8
add %g1,1,%g1
\f
- ba .Linner
add $j,8,$j
-.align 32
-.Linner:
ldd [$ap_l+$j],$alo ! load a[j] in double format
ldd [$ap_h+$j],$ahi
ldd [$np_l+$j],$nlo ! load n[j] in double format
ldd [$np_h+$j],$nhi
- fmuld $alo,$ba,$aloa
- fmuld $nlo,$na,$nloa
- fmuld $alo,$bb,$alob
- fmuld $nlo,$nb,$nlob
- fmuld $alo,$bc,$aloc
- fmuld $nlo,$nc,$nloc
- faddd $aloa,$nloa,$nloa
- fmuld $alo,$bd,$alod
- fmuld $nlo,$nd,$nlod
- faddd $alob,$nlob,$nlob
- fmuld $ahi,$ba,$ahia
- fmuld $nhi,$na,$nhia
- faddd $aloc,$nloc,$nloc
- fmuld $ahi,$bb,$ahib
- fmuld $nhi,$nb,$nhib
- faddd $alod,$nlod,$nlod
- fmuld $ahi,$bc,$ahic
- fmuld $nhi,$nc,$nhic
- faddd $ahia,$nhia,$nhia
- fmuld $ahi,$bd,$ahid
- fmuld $nhi,$nd,$nhid
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $dota,$nloa,$nloa
+ faddd $dotb,$nlob,$nlob
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ std $nlod,[%sp+$bias+$frame+24]
+\f
+ addcc $j,8,$j
+ bz,pn %icc,.Linnerskip
+.align 32,0x1000000
+.Linner:
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ add %o7,%o1,%o1
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ or %o7,%o0,%o0 ! 64-bit result
+ addcc %g1,%o0,%o0
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ ldx [$tp+8],%o7 ! tp[j]
+ addcc %o7,%o0,%o0
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
faddd $dota,$nloa,$nloa
faddd $dotb,$nlob,$nlob
faddd $ahic,$nhic,$dota ! $nhic
std $nlob,[%sp+$bias+$frame+8]
std $nloc,[%sp+$bias+$frame+16]
std $nlod,[%sp+$bias+$frame+24]
+
+ addcc $j,8,$j
+ bnz,pt %icc,.Linner
+ add $tp,8,$tp
+\f
+.Linnerskip:
ldx [%sp+$bias+$frame+0],%o0
ldx [%sp+$bias+$frame+8],%o1
ldx [%sp+$bias+$frame+16],%o2
add %g1,1,%g1
stx %o0,[$tp] ! tp[j-1]
- add $j,8,$j
- brnz $j,.Linner
add $tp,8,$tp
\f
fdtox $dota,$dota
bcs,a %xcc,.+8
add $carry,1,$carry
- add $i,8,$i
- brnz $i,.Louter
+ addcc $i,8,$i
+ bnz %icc,.Louter
nop
\f
- sub %g0,$num,$j ! j=-num
- add $tp,8,$tp ! adjust tp to point at the end
-
+ sub %g0,$num,%o7 ! n=-num
cmp $carry,0 ! clears %icc.c
bne,pn %icc,.Lsub
- nop
+ add $tp,8,$tp ! adjust tp to point at the end
ld [$tp-8],%o0
- ld [$np-8],%o1
- cmp %o0,%o1
+ ld [$np-4],%o1
+ cmp %o0,%o1 ! compare topmost words
bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken
nop
.align 32,0x1000000
.Lsub:
- ldd [$tp+$j],%o0
- ldd [$np+$j],%o2
- subccc %o1,%o3,%o1
- subccc %o0,%o2,%o0
- std %o0,[$rp+$j]
- add $j,8,$j
- brnz $j,.Lsub
+ ldd [$tp+%o7],%o0
+ ldd [$np+%o7],%o2
+ subccc %o1,%o2,%o2
+ subccc %o0,%o3,%o3
+ std %o2,[$rp+%o7]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lsub
nop
subccc $carry,0,$carry
- bcc %icc,.Lzap
- sub %g0,$num,$j
+ bcc,pt %icc,.Lzap
+ sub %g0,$num,%o7 ! n=-num
.align 16,0x1000000
.Lcopy:
- ldx [$tp+$j],%o0
- stx %o0,[$rp+$j]
- add $j,8,$j
- brnz $j,.Lcopy
+ ldx [$tp+%o7],%o0
+ srlx %o0,32,%o1
+ std %o0,[$rp+%o7]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lcopy
nop
ba .Lzap
- sub %g0,$num,$j
+ sub %g0,$num,%o7 ! n=-num
.align 32
.Lzap:
- stx %g0,[$tp+$j]
- stx %g0,[$ap_l+$j]
- stx %g0,[$ap_h+$j]
- stx %g0,[$np_l+$j]
- stx %g0,[$np_h+$j]
- add $j,8,$j
- brnz $j,.Lzap
+ stx %g0,[$tp+%o7]
+ stx %g0,[$ap_l+%o7]
+ stx %g0,[$ap_h+%o7]
+ stx %g0,[$np_l+%o7]
+ stx %g0,[$np_h+%o7]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lzap
nop
ldx [%sp+$bias+$frame+48],%o7
wr %g0,%o7,%asi ! restore %asi
mov 1,%i0
+.Lret:
ret
restore
.type $fname,#function
___
$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+# Below substitution makes it possible to compile without demanding
+# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
+# dare to do this, because VIS capability is detected at run-time now
+# and this routine is not called on CPU not capable to execute it. Do
+# note that fzeros is not the only VIS dependency! Another dependency
+# is implicit and is just _a_ numerical value loaded to %asi register,
+# which assembler can't recognize as VIS specific...
+$code =~ s/fzeros\s+%f([0-9]+)/
+ sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
+ /gem;
+
print $code;
+# flush
close STDOUT;