# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. Rights for redistribution and usage in source and binary
-# forms are granted according to the OpenSSL license.
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
# October 2005
# FPU is fully pipelined and can effectively emit 48 bit partial
# product every cycle. Why not blended SPARC v9? One can argue that
# making this module dependent on UltraSPARC VIS extension limits its
-# binary compatibility. Very well may be, but the simple fact is that
-# there is no known SPARC v9 implementation, which does not implement
-# VIS. Even brand new Fujitsu's SPARC64 V is equipped with VIS unit.
+# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
+# implementations from compatibility matrix. But the rest, whole Sun
+# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
+# VIS extension instructions used in this module. This is considered
+# good enough to not care about HAL SPARC64 users [if any] who have
+# integer-only pure SPARCv9 module to "fall down" to.
# USI&II cores currently exhibit uniform 2x improvement [over pre-
# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
# is >3x faster than USI&II one, which is harder to match [but see
# TODO list below]. It should also be noted that SPARC64 V features
# out-of-order execution, which *might* mean that integer multiplier
-# is pipelined, which in turn *might* be impossible to match...
+# is pipelined, which in turn *might* be impossible to match... On
+# additional note, SPARC64 V implements FP Multiply-Add instruction,
+# which is perfectly usable in this context... In other words, as far
+# as Fujitsu SPARC64 V goes, talk to the author:-)
-# In 32-bit context the implementation implies following additional
-# limitations on input arguments:
+# The implementation implies following "non-natural" limitations on
+# input arguments:
# - num may not be less than 4;
# - num has to be even;
# - ap, bp, rp, np has to be 64-bit aligned [which is not a problem
# noticeable(!) improvement);
# - dedicated squaring procedure[?];
-$fname="bn_mul_mont";
-$bits=32;
-for (@ARGV) {
- $bits=64 if (/\-m64/ || /\-xarch\=v9/);
- $vis=1 if (/\-mcpu=ultra/ || /\-xarch\=v[9|8plus]\S/);
-}
+######################################################################
+# November 2006
+#
+# Modulo-scheduled inner loops allow to interleave floating point and
+# integer instructions and minimize Read-After-Write penalties. This
+# results in *further* 20-50% perfromance improvement [depending on
+# key length, more for longer keys] on USI&II cores and 30-80% - on
+# USIII&IV.
-if (!$vis) {
-print<<___;
-.section ".text",#alloc,#execinstr
-.global $fname
-$fname:
- retl
- xor %o0,%o0,%o0 ! just signal "not implemented"
-.type $fname,#function
-.size $fname,(.-$fname)
-___
-exit;
-}
+$fname="bn_mul_mont_fpu";
+$bits=32;
+for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
if ($bits==64) {
$bias=2047;
save %sp,-$frame-$locals,%sp
sethi %hi(0xffff),$mask
or $mask,%lo(0xffff),$mask
-___
-$code.=<<___ if ($bits==64);
- ldx [%i4],$n0 ! $n0 reassigned, remember?
-___
-$code.=<<___ if ($bits==32);
+
cmp $num,4
bl,a,pn %icc,.Lret
clr %i0
ld [%i4+4],%o0
sllx %o0,32,%o0
or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
-___
-$code.=<<___;
+
sll $num,3,$num ! num*=8
add %sp,$bias,%o0 ! real top of stack
stx %o7,[%sp+$bias+$frame+48] ! save %asi
\f
- sub %g0,$num,$i
- sub %g0,$num,$j
+ sub %g0,$num,$i ! i=-num
+ sub %g0,$num,$j ! j=-num
add $ap,$j,%o3
add $bp,$i,%o4
-___
-$code.=<<___ if ($bits==64);
+
ldx [$bp+$i],%o0 ! bp[0]
ldx [$ap+$j],%o1 ! ap[0]
-___
-$code.=<<___ if ($bits==32);
- ldd [$bp+$i],%o0 ! bp[0]
- ldd [$ap+$j],%g2 ! ap[0]
- sllx %o1,32,%o1
- sllx %g3,32,%g3
- or %o0,%o1,%o0
- or %g2,%g3,%o1
-___
-$code.=<<___;
+ sllx %o0,32,%g1
+ sllx %o1,32,%g5
+ srlx %o0,32,%o0
+ srlx %o1,32,%o1
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
add $np,$j,%o5
mulx %o1,%o0,%o0 ! ap[0]*bp[0]
mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
stx %o0,[%sp+$bias+$frame+0]
- ld [%o3+`$bits==32 ? 0 : 4`],$alo_ ! load a[j] as pair of 32-bit words
+ ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
fzeros $alo
- ld [%o3+`$bits==32 ? 4 : 0`],$ahi_
+ ld [%o3+4],$ahi_
fzeros $ahi
- ld [%o5+`$bits==32 ? 0 : 4`],$nlo_ ! load n[j] as pair of 32-bit words
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
fzeros $nlo
- ld [%o5+`$bits==32 ? 4 : 0`],$nhi_
+ ld [%o5+4],$nhi_
fzeros $nhi
! transfer b[i] to FPU as 4x16-bit values
- ldda [%o4+`$bits==32 ? 2 : 6`]%asi,$ba
+ ldda [%o4+2]%asi,$ba
fxtod $alo,$alo
- ldda [%o4+`$bits==32 ? 0 : 4`]%asi,$bb
+ ldda [%o4+0]%asi,$bb
fxtod $ahi,$ahi
- ldda [%o4+`$bits==32 ? 6 : 2`]%asi,$bc
+ ldda [%o4+6]%asi,$bc
fxtod $nlo,$nlo
- ldda [%o4+`$bits==32 ? 4 : 0`]%asi,$bd
+ ldda [%o4+4]%asi,$bd
fxtod $nhi,$nhi
! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
fmuld $alo,$bb,$alob
fmuld $nlo,$nb,$nlob
fmuld $alo,$bc,$aloc
- fmuld $nlo,$nc,$nloc
faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
fmuld $alo,$bd,$alod
- fmuld $nlo,$nd,$nlod
faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
fmuld $ahi,$ba,$ahia
- fmuld $nhi,$na,$nhia
faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
fmuld $ahi,$bb,$ahib
- fmuld $nhi,$nb,$nhib
faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
fmuld $ahi,$bc,$ahic
- fmuld $nhi,$nc,$nhic
faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
fmuld $nhi,$nd,$nhid
- faddd $ahib,$nhib,$nhib
faddd $ahic,$nhic,$dota ! $nhic
faddd $ahid,$nhid,$dotb ! $nhid
fdtox $nlod,$nlod
std $nloa,[%sp+$bias+$frame+0]
+ add $j,8,$j
std $nlob,[%sp+$bias+$frame+8]
+ add $ap,$j,%o4
std $nloc,[%sp+$bias+$frame+16]
+ add $np,$j,%o5
std $nlod,[%sp+$bias+$frame+24]
+\f
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
!and %o0,$mask,%o0
!and %o1,$mask,%o1
!and %o2,$mask,%o2
!or %o2,%o0,%o0
!or %o7,%o0,%o0 ! 64-bit result
srlx %o3,16,%g1 ! 34-bit carry
+ fmuld $ahi,$bb,$ahib
+
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $dota,$nloa,$nloa
+ faddd $dotb,$nlob,$nlob
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.L1stskip
+ std $nlod,[%sp+$bias+$frame+24]
\f
- ba .L1st
- add $j,8,$j
-.align 32
+.align 32,0x1000000
.L1st:
- add $ap,$j,%o3
- add $np,$j,%o4
- ld [%o3+`$bits==32 ? 0 : 4`],$alo_ ! load a[j] as pair of 32-bit words
+ add $ap,$j,%o4
+ add $np,$j,%o5
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
fzeros $alo
- ld [%o3+`$bits==32 ? 4 : 0`],$ahi_
+ ld [%o4+4],$ahi_
fzeros $ahi
- ld [%o4+`$bits==32 ? 0 : 4`],$nlo_ ! load n[j] as pair of 32-bit words
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
fzeros $nlo
- ld [%o4+`$bits==32 ? 4 : 0`],$nhi_
+ ld [%o5+4],$nhi_
fzeros $nhi
fxtod $alo,$alo
fxtod $nlo,$nlo
fxtod $nhi,$nhi
- std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ ldx [%sp+$bias+$frame+0],%o0
fmuld $alo,$ba,$aloa
- std $ahi,[$ap_h+$j]
+ ldx [%sp+$bias+$frame+8],%o1
fmuld $nlo,$na,$nloa
- std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ ldx [%sp+$bias+$frame+16],%o2
fmuld $alo,$bb,$alob
- std $nhi,[$np_h+$j]
+ ldx [%sp+$bias+$frame+24],%o3
fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
fmuld $nlo,$nc,$nloc
- faddd $aloa,$nloa,$nloa
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
fmuld $nlo,$nd,$nlod
- faddd $alob,$nlob,$nlob
+ srlx %o2,16,%o7
fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ faddd $aloc,$nloc,$nloc
fmuld $nhi,$na,$nhia
- faddd $aloc,$nloc,$nloc
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
fmuld $ahi,$bb,$ahib
+ sllx %o1,16,%o1
+ faddd $alod,$nlod,$nlod
fmuld $nhi,$nb,$nhib
- faddd $alod,$nlod,$nlod
+ sllx %o2,32,%o2
fmuld $ahi,$bc,$ahic
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahia,$nhia,$nhia
fmuld $nhi,$nc,$nhic
- faddd $ahia,$nhia,$nhia
+ or %o2,%o0,%o0
fmuld $ahi,$bd,$ahid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $ahib,$nhib,$nhib
fmuld $nhi,$nd,$nhid
- faddd $ahib,$nhib,$nhib
+ addcc %g1,%o0,%o0
+ faddd $dota,$nloa,$nloa
+ srlx %o3,16,%g1 ! 34-bit carry
+ faddd $dotb,$nlob,$nlob
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
- faddd $dota,$nloa,$nloa
- faddd $dotb,$nlob,$nlob
faddd $ahic,$nhic,$dota ! $nhic
faddd $ahid,$nhid,$dotb ! $nhid
std $nlob,[%sp+$bias+$frame+8]
std $nloc,[%sp+$bias+$frame+16]
std $nlod,[%sp+$bias+$frame+24]
+
+ addcc $j,8,$j
+ bnz,pt %icc,.L1st
+ add $tp,8,$tp
+\f
+.L1stskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
ldx [%sp+$bias+$frame+0],%o0
ldx [%sp+$bias+$frame+8],%o1
ldx [%sp+$bias+$frame+16],%o2
ldx [%sp+$bias+$frame+24],%o3
srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
srlx %o1,16,%o7
add %o7,%o2,%o2
srlx %o2,16,%o7
or %o1,%o0,%o0
or %o2,%o0,%o0
or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+32],%o4
addcc %g1,%o0,%o0
+ ldx [%sp+$bias+$frame+40],%o5
srlx %o3,16,%g1 ! 34-bit carry
bcs,a %xcc,.+8
add %g1,1,%g1
stx %o0,[$tp] ! tp[j-1]=
- addcc $j,8,$j
- bnz,pt %icc,.L1st
add $tp,8,$tp
-\f
- fdtox $dota,$dota
- fdtox $dotb,$dotb
- std $dota,[%sp+$bias+$frame+32]
- std $dotb,[%sp+$bias+$frame+40]
- ldx [%sp+$bias+$frame+32],%o0
- ldx [%sp+$bias+$frame+40],%o1
- srlx %o0,16,%o7
- add %o7,%o1,%o1
- and %o0,$mask,%o0
- sllx %o1,16,%o7
- or %o7,%o0,%o0
- addcc %g1,%o0,%o0
- srlx %o1,48,%g1
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
bcs,a %xcc,.+8
add %g1,1,%g1
mov %g1,$carry
- stx %o0,[$tp] ! tp[num-1]=
+ stx %o4,[$tp] ! tp[num-1]=
\f
ba .Louter
add $i,8,$i
.align 32
.Louter:
- sub %g0,$num,$j
+ sub %g0,$num,$j ! j=-num
add %sp,$bias+$frame+$locals,$tp
add $bp,$i,%o4
-___
-$code.=<<___ if ($bits==64);
+
ldx [$bp+$i],%o0 ! bp[i]
ldx [$ap+$j],%o1 ! ap[0]
-___
-$code.=<<___ if ($bits==32);
- ldd [$bp+$i],%o0 ! bp[i]
- ldd [$ap+$j],%g2 ! ap[0]
- sllx %o1,32,%o1
- sllx %g3,32,%g3
- or %o0,%o1,%o0
- or %g2,%g3,%o1
-___
-$code.=<<___;
+ sllx %o0,32,%g1
+ sllx %o1,32,%g5
+ srlx %o0,32,%o0
+ srlx %o1,32,%o1
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
ldx [$tp],%o2 ! tp[0]
mulx %o1,%o0,%o0
addcc %o2,%o0,%o0
mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
stx %o0,[%sp+$bias+$frame+0]
-
! transfer b[i] to FPU as 4x16-bit values
- ldda [%o4+`$bits==32 ? 2 : 6`]%asi,$ba
- ldda [%o4+`$bits==32 ? 0 : 4`]%asi,$bb
- ldda [%o4+`$bits==32 ? 6 : 2`]%asi,$bc
- ldda [%o4+`$bits==32 ? 4 : 0`]%asi,$bd
+ ldda [%o4+2]%asi,$ba
+ ldda [%o4+0]%asi,$bb
+ ldda [%o4+6]%asi,$bc
+ ldda [%o4+4]%asi,$bd
! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
ldda [%sp+$bias+$frame+6]%asi,$na
fmuld $alo,$bb,$alob
fmuld $nlo,$nb,$nlob
fmuld $alo,$bc,$aloc
- fmuld $nlo,$nc,$nloc
faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
fmuld $alo,$bd,$alod
- fmuld $nlo,$nd,$nlod
faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
fmuld $ahi,$ba,$ahia
- fmuld $nhi,$na,$nhia
faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
fmuld $ahi,$bb,$ahib
- fmuld $nhi,$nb,$nhib
faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
fmuld $ahi,$bc,$ahic
- fmuld $nhi,$nc,$nhic
faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
fmuld $nhi,$nd,$nhid
- faddd $ahib,$nhib,$nhib
faddd $ahic,$nhic,$dota ! $nhic
faddd $ahid,$nhid,$dotb ! $nhid
std $nloa,[%sp+$bias+$frame+0]
std $nlob,[%sp+$bias+$frame+8]
std $nloc,[%sp+$bias+$frame+16]
+ add $j,8,$j
std $nlod,[%sp+$bias+$frame+24]
+\f
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
! why?
and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
and %o1,$mask,%o1
and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
sllx %o3,48,%o7
or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
or %o7,%o0,%o0 ! 64-bit result
ldx [$tp],%o7
+ faddd $nloc,$nhia,$nloc
addcc %o7,%o0,%o0
! end-of-why?
+ faddd $nlod,$nhib,$nlod
srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
bcs,a %xcc,.+8
add %g1,1,%g1
+
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.Linnerskip
+ std $nlod,[%sp+$bias+$frame+24]
\f
ba .Linner
- add $j,8,$j
+ nop
.align 32
.Linner:
ldd [$ap_l+$j],$alo ! load a[j] in double format
fmuld $alo,$bb,$alob
fmuld $nlo,$nb,$nlob
fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
fmuld $nlo,$nc,$nloc
- faddd $aloa,$nloa,$nloa
+ ldx [%sp+$bias+$frame+8],%o1
fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
fmuld $nlo,$nd,$nlod
- faddd $alob,$nlob,$nlob
+ ldx [%sp+$bias+$frame+24],%o3
fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
fmuld $nhi,$na,$nhia
- faddd $aloc,$nloc,$nloc
+ add %o7,%o1,%o1
fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
fmuld $nhi,$nb,$nhib
- faddd $alod,$nlod,$nlod
+ add %o7,%o2,%o2
fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
fmuld $nhi,$nc,$nhic
- faddd $ahia,$nhia,$nhia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $nloc,$nhia,$nloc
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+ fdtox $nlob,$nlob
+ addcc %o7,%o0,%o0
+ fdtox $nloc,$nloc
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
- faddd $ahib,$nhib,$nhib
- faddd $dota,$nloa,$nloa
- faddd $dotb,$nlob,$nlob
- faddd $ahic,$nhic,$dota ! $nhic
- faddd $ahid,$nhid,$dotb ! $nhid
-
- faddd $nloc,$nhia,$nloc
- faddd $nlod,$nhib,$nlod
-
- fdtox $nloa,$nloa
- fdtox $nlob,$nlob
- fdtox $nloc,$nloc
- fdtox $nlod,$nlod
+ stx %o0,[$tp] ! tp[j-1]
+ fdtox $nlod,$nlod
std $nloa,[%sp+$bias+$frame+0]
std $nlob,[%sp+$bias+$frame+8]
std $nloc,[%sp+$bias+$frame+16]
+ addcc $j,8,$j
std $nlod,[%sp+$bias+$frame+24]
+ bnz,pt %icc,.Linner
+ add $tp,8,$tp
+\f
+.Linnerskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
ldx [%sp+$bias+$frame+0],%o0
ldx [%sp+$bias+$frame+8],%o1
ldx [%sp+$bias+$frame+16],%o2
ldx [%sp+$bias+$frame+24],%o3
srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
srlx %o1,16,%o7
add %o7,%o2,%o2
srlx %o2,16,%o7
sllx %o3,48,%o7
or %o1,%o0,%o0
or %o2,%o0,%o0
+ ldx [%sp+$bias+$frame+32],%o4
or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+40],%o5
addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
srlx %o3,16,%g1 ! 34-bit carry
bcs,a %xcc,.+8
add %g1,1,%g1
- ldx [$tp+8],%o7 ! tp[j]
addcc %o7,%o0,%o0
bcs,a %xcc,.+8
add %g1,1,%g1
stx %o0,[$tp] ! tp[j-1]
- addcc $j,8,$j
- bnz,pt %icc,.Linner
add $tp,8,$tp
-\f
- fdtox $dota,$dota
- fdtox $dotb,$dotb
- std $dota,[%sp+$bias+$frame+32]
- std $dotb,[%sp+$bias+$frame+40]
- ldx [%sp+$bias+$frame+32],%o0
- ldx [%sp+$bias+$frame+40],%o1
- srlx %o0,16,%o7
- add %o7,%o1,%o1
- and %o0,$mask,%o0
- sllx %o1,16,%o7
- or %o7,%o0,%o0
- addcc %g1,%o0,%o0
- srlx %o1,48,%g1
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
bcs,a %xcc,.+8
add %g1,1,%g1
- addcc $carry,%o0,%o0
- stx %o0,[$tp] ! tp[num-1]
+ addcc $carry,%o4,%o4
+ stx %o4,[$tp] ! tp[num-1]
mov %g1,$carry
bcs,a %xcc,.+8
add $carry,1,$carry
add $tp,8,$tp ! adjust tp to point at the end
ld [$tp-8],%o0
- ld [$np-`$bits==32 ? 4 : 8`],%o1
+ ld [$np-4],%o1
cmp %o0,%o1 ! compare topmost words
bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken
nop
.Lsub:
ldd [$tp+%o7],%o0
ldd [$np+%o7],%o2
-___
-$code.=<<___ if ($bits==64);
- subccc %o1,%o3,%o3
- subccc %o0,%o2,%o2
-___
-$code.=<<___ if ($bits==32);
subccc %o1,%o2,%o2
subccc %o0,%o3,%o3
-___
-$code.=<<___;
std %o2,[$rp+%o7]
add %o7,8,%o7
brnz,pt %o7,.Lsub
nop
subccc $carry,0,$carry
bcc,pt %icc,.Lzap
- sub %g0,$num,%o7
+ sub %g0,$num,%o7 ! n=-num
.align 16,0x1000000
.Lcopy:
ldx [$tp+%o7],%o0
-___
-$code.=<<___ if ($bits==64);
- stx %o0,[$rp+%o7]
-___
-$code.=<<___ if ($bits==32);
srlx %o0,32,%o1
std %o0,[$rp+%o7]
-___
-$code.=<<___;
add %o7,8,%o7
brnz,pt %o7,.Lcopy
nop
ba .Lzap
- sub %g0,$num,%o7
+ sub %g0,$num,%o7 ! n=-num
.align 32
.Lzap:
___
$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+# Below substitution makes it possible to compile without demanding
+# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
+# dare to do this, because VIS capability is detected at run-time now
+# and this routine is not called on CPU not capable to execute it. Do
+# note that fzeros is not the only VIS dependency! Another dependency
+# is implicit and is just _a_ numerical value loaded to %asi register,
+# which assembler can't recognize as VIS specific...
+$code =~ s/fzeros\s+%f([0-9]+)/
+ sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
+ /gem;
+
print $code;
+# flush
close STDOUT;