2 # Copyright 2012-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # SPARCv9 VIS3 Montgomery multiplication procedure suitable for T3 and
20 # onward. There are three new instructions used here: umulxhi,
21 # addxc[cc] and initializing store. On T3 RSA private key operations
22 # are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
23 # lengths. This is without dedicated squaring procedure. On T4
24 # corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
25 # for reference purposes, because T4 has dedicated Montgomery
26 # multiplication and squaring *instructions* that deliver even more.
28 $output = pop and open STDOUT,">$output";
30 $frame = "STACK_FRAME";
34 #include "sparc_arch.h"
37 .register %g2,#scratch
38 .register %g3,#scratch
41 .section ".text",#alloc,#execinstr
44 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
45 (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
48 $rp="%o0"; # BN_ULONG *rp,
49 $ap="%o1"; # const BN_ULONG *ap,
50 $bp="%o2"; # const BN_ULONG *bp,
51 $np="%o3"; # const BN_ULONG *np,
52 $n0p="%o4"; # const BN_ULONG *n0,
53 $num="%o5"; # int num); # caller ensures that num is even
56 .globl bn_mul_mont_vis3
59 add %sp, $bias, %g4 ! real top of stack
60 sll $num, 2, $num ! size in bytes
62 andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes
64 add %g5, %g1, %g1 ! 3*buffer size
66 andn %g1, 63, %g1 ! align at 64 byte
67 sub %g1, $frame, %g1 ! new top of stack
73 # +-------------------------------+<----- %sp
75 # +-------------------------------+<----- aligned at 64 bytes
77 # +-------------------------------+
80 # +-------------------------------+<----- aligned at 64 bytes
81 # | __int64 ap[1..0] | converted ap[]
82 # +-------------------------------+
83 # | __int64 np[1..0] | converted np[]
84 # +-------------------------------+
85 # | __int64 ap[3..2] |
88 # +-------------------------------+
89 ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
90 ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
93 ld [$n0p+0], $t0 ! pull n0[0..1] value
94 add %sp, $bias+$frame, $tp
97 ld [$bp+0], $t2 ! m0=bp[0]
103 ld [$ap+0], $t0 ! ap[0]
108 ld [$ap+8], $t2 ! ap[1]
113 stx $aj, [$anp] ! converted ap[0]
115 mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
116 umulxhi $aj, $m0, $hi0
118 ld [$np+0], $t0 ! np[0]
123 ld [$np+8], $t2 ! np[1]
128 stx $nj, [$anp+8] ! converted np[0]
130 mulx $lo0, $n0, $m1 ! "tp[0]"*n0
131 stx $aj, [$anp+16] ! converted ap[1]
133 mulx $aj, $m0, $alo ! ap[1]*bp[0]
134 umulxhi $aj, $m0, $aj ! ahi=aj
136 mulx $nj, $m1, $lo1 ! np[0]*m1
137 umulxhi $nj, $m1, $hi1
141 stx $nj, [$anp+24] ! converted np[1]
144 addcc $lo0, $lo1, $lo1
145 addxc %g0, $hi1, $hi1
147 mulx $nj, $m1, $nlo ! np[1]*m1
148 umulxhi $nj, $m1, $nj ! nhi=nj
151 sub $num, 24, $cnt ! cnt=num-3
155 ld [$ap+0], $t0 ! ap[j]
156 addcc $alo, $hi0, $lo0
163 stx $aj, [$anp] ! converted ap[j]
165 ld [$np+0], $t2 ! np[j]
166 addcc $nlo, $hi1, $lo1
168 addxc $nj, %g0, $hi1 ! nhi=nj
172 mulx $aj, $m0, $alo ! ap[j]*bp[0]
174 umulxhi $aj, $m0, $aj ! ahi=aj
175 stx $nj, [$anp+8] ! converted np[j]
176 add $anp, 16, $anp ! anp++
178 mulx $nj, $m1, $nlo ! np[j]*m1
179 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
180 umulxhi $nj, $m1, $nj ! nhi=nj
181 addxc %g0, $hi1, $hi1
182 stx $lo1, [$tp] ! tp[j-1]
183 add $tp, 8, $tp ! tp++
186 sub $cnt, 8, $cnt ! j--
188 addcc $alo, $hi0, $lo0
189 addxc $aj, %g0, $hi0 ! ahi=aj
191 addcc $nlo, $hi1, $lo1
193 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
194 addxc %g0, $hi1, $hi1
195 stx $lo1, [$tp] ! tp[j-1]
198 addcc $hi0, $hi1, $hi1
199 addxc %g0, %g0, $ovf ! upmost overflow bit
204 sub $num, 16, $i ! i=num-2
208 ld [$bp+0], $t2 ! m0=bp[i]
211 sub $anp, $num, $anp ! rewind
217 ldx [$anp+0], $aj ! ap[0]
219 ldx [$anp+8], $nj ! np[0]
221 mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
222 ldx [$tp], $tj ! tp[0]
223 umulxhi $aj, $m0, $hi0
224 ldx [$anp+16], $aj ! ap[1]
225 addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
226 mulx $aj, $m0, $alo ! ap[1]*bp[i]
227 addxc %g0, $hi0, $hi0
228 mulx $lo0, $n0, $m1 ! tp[0]*n0
229 umulxhi $aj, $m0, $aj ! ahi=aj
230 mulx $nj, $m1, $lo1 ! np[0]*m1
231 umulxhi $nj, $m1, $hi1
232 ldx [$anp+24], $nj ! np[1]
234 addcc $lo1, $lo0, $lo1
235 mulx $nj, $m1, $nlo ! np[1]*m1
236 addxc %g0, $hi1, $hi1
237 umulxhi $nj, $m1, $nj ! nhi=nj
240 sub $num, 24, $cnt ! cnt=num-3
243 addcc $alo, $hi0, $lo0
244 ldx [$tp+8], $tj ! tp[j]
245 addxc $aj, %g0, $hi0 ! ahi=aj
246 ldx [$anp+0], $aj ! ap[j]
247 addcc $nlo, $hi1, $lo1
248 mulx $aj, $m0, $alo ! ap[j]*bp[i]
249 addxc $nj, %g0, $hi1 ! nhi=nj
250 ldx [$anp+8], $nj ! np[j]
252 umulxhi $aj, $m0, $aj ! ahi=aj
253 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
254 mulx $nj, $m1, $nlo ! np[j]*m1
255 addxc %g0, $hi0, $hi0
256 umulxhi $nj, $m1, $nj ! nhi=nj
257 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
258 addxc %g0, $hi1, $hi1
259 stx $lo1, [$tp] ! tp[j-1]
261 brnz,pt $cnt, .Linner
264 ldx [$tp+8], $tj ! tp[j]
265 addcc $alo, $hi0, $lo0
266 addxc $aj, %g0, $hi0 ! ahi=aj
267 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
268 addxc %g0, $hi0, $hi0
270 addcc $nlo, $hi1, $lo1
271 addxc $nj, %g0, $hi1 ! nhi=nj
272 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
273 addxc %g0, $hi1, $hi1
274 stx $lo1, [$tp] ! tp[j-1]
276 subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
277 addxccc $hi1, $hi0, $hi1
285 sub $anp, $num, $anp ! rewind
289 subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
297 subccc $tj, $nj, $t2 ! tp[j]-np[j]
302 st $t2, [$rp-4] ! reverse order
307 sub $anp, $num, $anp ! rewind
312 subccc $ovf, %g0, $ovf ! handle upmost overflow bit
317 .Lcopy: ! conditional copy
324 stx %g0, [$anp] ! zap
329 st $t3, [$rp+0] ! flip order
338 .type bn_mul_mont_vis3, #function
339 .size bn_mul_mont_vis3, .-bn_mul_mont_vis3
340 .asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
344 # Purpose of these subroutines is to explicitly encode VIS instructions,
345 # so that one can compile the module without having to specify VIS
346 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
347 # Idea is to reserve for option to produce "universal" binary and let
348 # programmer detect if current CPU is VIS capable at run-time.
350 my ($mnemonic,$rs1,$rs2,$rd)=@_;
351 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
353 my %visopf = ( "addxc" => 0x011,
355 "umulxhi" => 0x016 );
357 $ref = "$mnemonic\t$rs1,$rs2,$rd";
359 if ($opf=$visopf{$mnemonic}) {
360 foreach ($rs1,$rs2,$rd) {
361 return $ref if (!/%([goli])([0-9])/);
365 return sprintf ".word\t0x%08x !%s",
366 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
373 foreach (split("\n",$code)) {
374 s/\`([^\`]*)\`/eval $1/ge;
376 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/