d78b43209a64b32b7f5f714f2a5c1f340b4aef79
[openssl.git] / crypto / bn / asm / sparcv9-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # December 2005
11 #
12 # Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
13 # for undertaken effort are multiple. First of all, UltraSPARC is not
14 # the whole SPARCv9 universe and other VIS-free implementations deserve
15 # optimized code as much. Secondly, newly introduced UltraSPARC T1,
16 # a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
17 # such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
18 # several integrated RSA/DSA accelerator circuits accessible through
19 # kernel driver [only(*)], but having decent user-land software
20 # implementation is important too. Finally, reasons like desire to
21 # experiment with dedicated squaring procedure. Yes, this module
22 # implements one, because it was easiest to draft it in SPARCv9
23 # instructions...
24
25 # (*)   Engine accessing the driver in question is on my TODO list.
26 #       For reference, acceleator is estimated to give 6 to 10 times
27 #       improvement on single-threaded RSA sign. It should be noted
28 #       that 6-10x improvement coefficient does not actually mean
29 #       something extraordinary in terms of absolute [single-threaded]
30 #       performance, as SPARCv9 instruction set is by all means least
31 #       suitable for high performance crypto among other 64 bit
32 #       platforms. 6-10x factor simply places T1 in same performance
33 #       domain as say AMD64 and IA-64. Improvement of RSA verify don't
34 #       appear impressive at all, but it's the sign operation which is
35 #       far more critical/interesting.
36
37 # You might notice that inner loops are modulo-scheduled:-) This has
38 # essentially negligible impact on UltraSPARC performance, it's
39 # Fujitsu SPARC64 V users who should notice and hopefully appreciate
40 # the advantage... Currently this module surpasses sparcv9a-mont.pl
41 # by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
42 # module still have hidden potential [see TODO list there], which is
43 # estimated to be larger than 20%...
44
45 # int bn_mul_mont(
46 $rp="%i0";      # BN_ULONG *rp,
47 $ap="%i1";      # const BN_ULONG *ap,
48 $bp="%i2";      # const BN_ULONG *bp,
49 $np="%i3";      # const BN_ULONG *np,
50 $n0="%i4";      # const BN_ULONG *n0,
51 $num="%i5";     # int num);
52
53 $bits=32;
54 for (@ARGV)     { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
55 if ($bits==64)  { $bias=2047; $frame=192; }
56 else            { $bias=0;    $frame=128; }
57
58 $car0="%o0";
59 $car1="%o1";
60 $car2="%o2";    # 1 bit
61 $acc0="%o3";
62 $acc1="%o4";
63 $mask="%g1";    # 32 bits, what a waste...
64 $tmp0="%g4";
65 $tmp1="%g5";
66
67 $i="%l0";
68 $j="%l1";
69 $mul0="%l2";
70 $mul1="%l3";
71 $tp="%l4";
72 $apj="%l5";
73 $npj="%l6";
74 $tpj="%l7";
75
76 $fname="bn_mul_mont_int";
77
78 $code=<<___;
79 .section        ".text",#alloc,#execinstr
80
81 .global $fname
82 .align  32
83 $fname:
84         cmp     %o5,4                   ! 128 bits minimum
85         bge,pt  %icc,.Lenter
86         sethi   %hi(0xffffffff),$mask
87         retl
88         clr     %o0
89 .align  32
90 .Lenter:
91         save    %sp,-$frame,%sp
92         sll     $num,2,$num             ! num*=4
93         or      $mask,%lo(0xffffffff),$mask
94         ld      [$n0],$n0
95         cmp     $ap,$bp
96         and     $num,$mask,$num
97         ld      [$bp],$mul0             ! bp[0]
98         nop
99
100         add     %sp,$bias,%o7           ! real top of stack
101         ld      [$ap],$car0             ! ap[0] ! redundant in squaring context
102         sub     %o7,$num,%o7
103         ld      [$ap+4],$apj            ! ap[1]
104         and     %o7,-1024,%o7
105         ld      [$np],$car1             ! np[0]
106         sub     %o7,$bias,%sp           ! alloca
107         ld      [$np+4],$npj            ! np[1]
108         be,pt   `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont
109         mov     12,$j
110
111         mulx    $car0,$mul0,$car0       ! ap[0]*bp[0]
112         mulx    $apj,$mul0,$tmp0        !prologue! ap[1]*bp[0]
113         and     $car0,$mask,$acc0
114         add     %sp,$bias+$frame,$tp
115         ld      [$ap+8],$apj            !prologue!
116
117         mulx    $n0,$acc0,$mul1         ! "t[0]"*n0
118         and     $mul1,$mask,$mul1
119
120         mulx    $car1,$mul1,$car1       ! np[0]*"t[0]"*n0
121         mulx    $npj,$mul1,$acc1        !prologue! np[1]*"t[0]"*n0
122         srlx    $car0,32,$car0
123         add     $acc0,$car1,$car1
124         ld      [$np+8],$npj            !prologue!
125         srlx    $car1,32,$car1
126         mov     $tmp0,$acc0             !prologue!
127
128 .L1st:
129         mulx    $apj,$mul0,$tmp0
130         mulx    $npj,$mul1,$tmp1
131         add     $acc0,$car0,$car0
132         ld      [$ap+$j],$apj           ! ap[j]
133         and     $car0,$mask,$acc0
134         add     $acc1,$car1,$car1
135         ld      [$np+$j],$npj           ! np[j]
136         srlx    $car0,32,$car0
137         add     $acc0,$car1,$car1
138         add     $j,4,$j                 ! j++
139         mov     $tmp0,$acc0
140         st      $car1,[$tp]
141         cmp     $j,$num
142         mov     $tmp1,$acc1
143         srlx    $car1,32,$car1
144         bl      %icc,.L1st
145         add     $tp,4,$tp               ! tp++
146 !.L1st
147
148         mulx    $apj,$mul0,$tmp0        !epilogue!
149         mulx    $npj,$mul1,$tmp1
150         add     $acc0,$car0,$car0
151         and     $car0,$mask,$acc0
152         add     $acc1,$car1,$car1
153         srlx    $car0,32,$car0
154         add     $acc0,$car1,$car1
155         st      $car1,[$tp]
156         srlx    $car1,32,$car1
157
158         add     $tmp0,$car0,$car0
159         and     $car0,$mask,$acc0
160         add     $tmp1,$car1,$car1
161         srlx    $car0,32,$car0
162         add     $acc0,$car1,$car1
163         st      $car1,[$tp+4]
164         srlx    $car1,32,$car1
165
166         add     $car0,$car1,$car1
167         st      $car1,[$tp+8]
168         srlx    $car1,32,$car2
169 \f
170         mov     4,$i                    ! i++
171         ld      [$bp+4],$mul0           ! bp[1]
172 .Louter:
173         add     %sp,$bias+$frame,$tp
174         ld      [$ap],$car0             ! ap[0]
175         ld      [$ap+4],$apj            ! ap[1]
176         ld      [$np],$car1             ! np[0]
177         ld      [$np+4],$npj            ! np[1]
178         ld      [$tp],$tmp1             ! tp[0]
179         ld      [$tp+4],$tpj            ! tp[1]
180         mov     12,$j
181
182         mulx    $car0,$mul0,$car0
183         mulx    $apj,$mul0,$tmp0        !prologue!
184         add     $tmp1,$car0,$car0
185         ld      [$ap+8],$apj            !prologue!
186         and     $car0,$mask,$acc0
187
188         mulx    $n0,$acc0,$mul1
189         and     $mul1,$mask,$mul1
190
191         mulx    $car1,$mul1,$car1
192         mulx    $npj,$mul1,$acc1        !prologue!
193         srlx    $car0,32,$car0
194         add     $acc0,$car1,$car1
195         ld      [$np+8],$npj            !prologue!
196         srlx    $car1,32,$car1
197         mov     $tmp0,$acc0             !prologue!
198
199 .Linner:
200         mulx    $apj,$mul0,$tmp0
201         mulx    $npj,$mul1,$tmp1
202         add     $tpj,$car0,$car0
203         ld      [$ap+$j],$apj           ! ap[j]
204         add     $acc0,$car0,$car0
205         add     $acc1,$car1,$car1
206         ld      [$np+$j],$npj           ! np[j]
207         and     $car0,$mask,$acc0
208         ld      [$tp+8],$tpj            ! tp[j]
209         srlx    $car0,32,$car0
210         add     $acc0,$car1,$car1
211         add     $j,4,$j                 ! j++
212         mov     $tmp0,$acc0
213         st      $car1,[$tp]             ! tp[j-1]
214         srlx    $car1,32,$car1
215         mov     $tmp1,$acc1
216         cmp     $j,$num
217         bl      %icc,.Linner
218         add     $tp,4,$tp               ! tp++
219 !.Linner
220
221         mulx    $apj,$mul0,$tmp0        !epilogue!
222         mulx    $npj,$mul1,$tmp1
223         add     $tpj,$car0,$car0
224         add     $acc0,$car0,$car0
225         ld      [$tp+8],$tpj            ! tp[j]
226         and     $car0,$mask,$acc0
227         add     $acc1,$car1,$car1
228         srlx    $car0,32,$car0
229         add     $acc0,$car1,$car1
230         st      $car1,[$tp]             ! tp[j-1]
231         srlx    $car1,32,$car1
232
233         add     $tpj,$car0,$car0
234         add     $tmp0,$car0,$car0
235         and     $car0,$mask,$acc0
236         add     $tmp1,$car1,$car1
237         add     $acc0,$car1,$car1
238         st      $car1,[$tp+4]           ! tp[j-1]
239         srlx    $car0,32,$car0
240         add     $i,4,$i                 ! i++
241         srlx    $car1,32,$car1
242
243         add     $car0,$car1,$car1
244         cmp     $i,$num
245         add     $car2,$car1,$car1
246         st      $car1,[$tp+8]
247
248         srlx    $car1,32,$car2
249         bl,a    %icc,.Louter
250         ld      [$bp+$i],$mul0          ! bp[i]
251 !.Louter
252
253         add     $tp,12,$tp
254 \f
255 .Ltail:
256         add     $np,$num,$np
257         add     $rp,$num,$rp
258         mov     $tp,$ap
259         sub     %g0,$num,%o7            ! k=-num
260
261         srl     $npj,30,%o0             ! boundary condition...
262         brz,pn  %o0,.Lcopy              ! ... is met
263         subcc   %g0,%g0,%g0             ! clear %icc.c
264
265 .align  16,0x1000000
266 .Lsub:
267         ld      [$tp+%o7],%o0
268         ld      [$np+%o7],%o1
269         subccc  %o0,%o1,%o1             ! tp[j]-np[j]
270         st      %o1,[$rp+%o7]
271         add     %o7,4,%o7
272         brnz    %o7,.Lsub
273         nop
274         subc    $car2,0,$car2           ! handle upmost overflow bit
275         and     $tp,$car2,$ap
276         andn    $rp,$car2,$np
277         or      $ap,$np,$ap
278         sub     %g0,$num,%o7
279
280 .align  16,0x1000000
281 .Lcopy:
282         ld      [$ap+%o7],%o0           ! copy or in-place refresh
283         st      %g0,[$tp+%o7]           ! zap tp
284         st      %o0,[$rp+%o7]
285         add     %o7,4,%o7
286         brnz    %o7,.Lcopy
287         nop
288         mov     1,%i0
289         ret
290         restore
291 ___
292 \f
293 ########
294 ######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
295 ######## code without following dedicated squaring procedure.
296 ########
297 $sbit="%i2";            # re-use $bp!
298
299 $code.=<<___;
300 .align  32
301 .Lbn_sqr_mont:
302         mulx    $mul0,$mul0,$car0               ! ap[0]*ap[0]
303         mulx    $apj,$mul0,$tmp0                !prologue!
304         and     $car0,$mask,$acc0
305         add     %sp,$bias+$frame,$tp
306         ld      [$ap+8],$apj                    !prologue!
307
308         mulx    $n0,$acc0,$mul1                 ! "t[0]"*n0
309         srlx    $car0,32,$car0
310         and     $mul1,$mask,$mul1
311
312         mulx    $car1,$mul1,$car1               ! np[0]*"t[0]"*n0
313         mulx    $npj,$mul1,$acc1                !prologue!
314         and     $car0,1,$sbit
315         ld      [$np+8],$npj                    !prologue!
316         srlx    $car0,1,$car0
317         add     $acc0,$car1,$car1
318         srlx    $car1,32,$car1
319         mov     $tmp0,$acc0                     !prologue!
320
321 .Lsqr_1st:
322         mulx    $apj,$mul0,$tmp0
323         mulx    $npj,$mul1,$tmp1
324         add     $acc0,$car0,$car0               ! ap[j]*a0+c0
325         add     $acc1,$car1,$car1
326         ld      [$ap+$j],$apj                   ! ap[j]
327         and     $car0,$mask,$acc0
328         ld      [$np+$j],$npj                   ! np[j]
329         srlx    $car0,32,$car0
330         add     $acc0,$acc0,$acc0
331         or      $sbit,$acc0,$acc0
332         mov     $tmp1,$acc1
333         srlx    $acc0,32,$sbit
334         add     $j,4,$j                         ! j++
335         and     $acc0,$mask,$acc0
336         cmp     $j,$num
337         add     $acc0,$car1,$car1
338         st      $car1,[$tp]
339         mov     $tmp0,$acc0
340         srlx    $car1,32,$car1
341         bl      %icc,.Lsqr_1st
342         add     $tp,4,$tp                       ! tp++
343 !.Lsqr_1st
344
345         mulx    $apj,$mul0,$tmp0                ! epilogue
346         mulx    $npj,$mul1,$tmp1
347         add     $acc0,$car0,$car0               ! ap[j]*a0+c0
348         add     $acc1,$car1,$car1
349         and     $car0,$mask,$acc0
350         srlx    $car0,32,$car0
351         add     $acc0,$acc0,$acc0
352         or      $sbit,$acc0,$acc0
353         srlx    $acc0,32,$sbit
354         and     $acc0,$mask,$acc0
355         add     $acc0,$car1,$car1
356         st      $car1,[$tp]
357         srlx    $car1,32,$car1
358
359         add     $tmp0,$car0,$car0               ! ap[j]*a0+c0
360         add     $tmp1,$car1,$car1
361         and     $car0,$mask,$acc0
362         srlx    $car0,32,$car0
363         add     $acc0,$acc0,$acc0
364         or      $sbit,$acc0,$acc0
365         srlx    $acc0,32,$sbit
366         and     $acc0,$mask,$acc0
367         add     $acc0,$car1,$car1
368         st      $car1,[$tp+4]
369         srlx    $car1,32,$car1
370
371         add     $car0,$car0,$car0
372         or      $sbit,$car0,$car0
373         add     $car0,$car1,$car1
374         st      $car1,[$tp+8]
375         srlx    $car1,32,$car2
376 \f
377         ld      [%sp+$bias+$frame],$tmp0        ! tp[0]
378         ld      [%sp+$bias+$frame+4],$tmp1      ! tp[1]
379         ld      [%sp+$bias+$frame+8],$tpj       ! tp[2]
380         ld      [$ap+4],$mul0                   ! ap[1]
381         ld      [$ap+8],$apj                    ! ap[2]
382         ld      [$np],$car1                     ! np[0]
383         ld      [$np+4],$npj                    ! np[1]
384         mulx    $n0,$tmp0,$mul1
385
386         mulx    $mul0,$mul0,$car0
387         and     $mul1,$mask,$mul1
388
389         mulx    $car1,$mul1,$car1
390         mulx    $npj,$mul1,$acc1
391         add     $tmp0,$car1,$car1
392         and     $car0,$mask,$acc0
393         ld      [$np+8],$npj                    ! np[2]
394         srlx    $car1,32,$car1
395         add     $tmp1,$car1,$car1
396         srlx    $car0,32,$car0
397         add     $acc0,$car1,$car1
398         and     $car0,1,$sbit
399         add     $acc1,$car1,$car1
400         srlx    $car0,1,$car0
401         mov     12,$j
402         st      $car1,[%sp+$bias+$frame]        ! tp[0]=
403         srlx    $car1,32,$car1
404         add     %sp,$bias+$frame+4,$tp
405
406 .Lsqr_2nd:
407         mulx    $apj,$mul0,$acc0
408         mulx    $npj,$mul1,$acc1
409         add     $acc0,$car0,$car0
410         add     $tpj,$car1,$car1
411         ld      [$ap+$j],$apj                   ! ap[j]
412         and     $car0,$mask,$acc0
413         ld      [$np+$j],$npj                   ! np[j]
414         srlx    $car0,32,$car0
415         add     $acc1,$car1,$car1
416         ld      [$tp+8],$tpj                    ! tp[j]
417         add     $acc0,$acc0,$acc0
418         add     $j,4,$j                         ! j++
419         or      $sbit,$acc0,$acc0
420         srlx    $acc0,32,$sbit
421         and     $acc0,$mask,$acc0
422         cmp     $j,$num
423         add     $acc0,$car1,$car1
424         st      $car1,[$tp]                     ! tp[j-1]
425         srlx    $car1,32,$car1
426         bl      %icc,.Lsqr_2nd
427         add     $tp,4,$tp                       ! tp++
428 !.Lsqr_2nd
429
430         mulx    $apj,$mul0,$acc0
431         mulx    $npj,$mul1,$acc1
432         add     $acc0,$car0,$car0
433         add     $tpj,$car1,$car1
434         and     $car0,$mask,$acc0
435         srlx    $car0,32,$car0
436         add     $acc1,$car1,$car1
437         add     $acc0,$acc0,$acc0
438         or      $sbit,$acc0,$acc0
439         srlx    $acc0,32,$sbit
440         and     $acc0,$mask,$acc0
441         add     $acc0,$car1,$car1
442         st      $car1,[$tp]                     ! tp[j-1]
443         srlx    $car1,32,$car1
444
445         add     $car0,$car0,$car0
446         or      $sbit,$car0,$car0
447         add     $car0,$car1,$car1
448         add     $car2,$car1,$car1
449         st      $car1,[$tp+4]
450         srlx    $car1,32,$car2
451 \f
452         ld      [%sp+$bias+$frame],$tmp1        ! tp[0]
453         ld      [%sp+$bias+$frame+4],$tpj       ! tp[1]
454         ld      [$ap+8],$mul0                   ! ap[2]
455         ld      [$np],$car1                     ! np[0]
456         ld      [$np+4],$npj                    ! np[1]
457         mulx    $n0,$tmp1,$mul1
458         and     $mul1,$mask,$mul1
459         mov     8,$i
460
461         mulx    $mul0,$mul0,$car0
462         mulx    $car1,$mul1,$car1
463         and     $car0,$mask,$acc0
464         add     $tmp1,$car1,$car1
465         srlx    $car0,32,$car0
466         add     %sp,$bias+$frame,$tp
467         srlx    $car1,32,$car1
468         and     $car0,1,$sbit
469         srlx    $car0,1,$car0
470         mov     4,$j
471
472 .Lsqr_outer:
473 .Lsqr_inner1:
474         mulx    $npj,$mul1,$acc1
475         add     $tpj,$car1,$car1
476         add     $j,4,$j
477         ld      [$tp+8],$tpj
478         cmp     $j,$i
479         add     $acc1,$car1,$car1
480         ld      [$np+$j],$npj
481         st      $car1,[$tp]
482         srlx    $car1,32,$car1
483         bl      %icc,.Lsqr_inner1
484         add     $tp,4,$tp
485 !.Lsqr_inner1
486
487         add     $j,4,$j
488         ld      [$ap+$j],$apj                   ! ap[j]
489         mulx    $npj,$mul1,$acc1
490         add     $tpj,$car1,$car1
491         ld      [$np+$j],$npj                   ! np[j]
492         add     $acc0,$car1,$car1
493         ld      [$tp+8],$tpj                    ! tp[j]
494         add     $acc1,$car1,$car1
495         st      $car1,[$tp]
496         srlx    $car1,32,$car1
497
498         add     $j,4,$j
499         cmp     $j,$num
500         be,pn   %icc,.Lsqr_no_inner2
501         add     $tp,4,$tp
502
503 .Lsqr_inner2:
504         mulx    $apj,$mul0,$acc0
505         mulx    $npj,$mul1,$acc1
506         add     $tpj,$car1,$car1
507         add     $acc0,$car0,$car0
508         ld      [$ap+$j],$apj                   ! ap[j]
509         and     $car0,$mask,$acc0
510         ld      [$np+$j],$npj                   ! np[j]
511         srlx    $car0,32,$car0
512         add     $acc0,$acc0,$acc0
513         ld      [$tp+8],$tpj                    ! tp[j]
514         or      $sbit,$acc0,$acc0
515         add     $j,4,$j                         ! j++
516         srlx    $acc0,32,$sbit
517         and     $acc0,$mask,$acc0
518         cmp     $j,$num
519         add     $acc0,$car1,$car1
520         add     $acc1,$car1,$car1
521         st      $car1,[$tp]                     ! tp[j-1]
522         srlx    $car1,32,$car1
523         bl      %icc,.Lsqr_inner2
524         add     $tp,4,$tp                       ! tp++
525
526 .Lsqr_no_inner2:
527         mulx    $apj,$mul0,$acc0
528         mulx    $npj,$mul1,$acc1
529         add     $tpj,$car1,$car1
530         add     $acc0,$car0,$car0
531         and     $car0,$mask,$acc0
532         srlx    $car0,32,$car0
533         add     $acc0,$acc0,$acc0
534         or      $sbit,$acc0,$acc0
535         srlx    $acc0,32,$sbit
536         and     $acc0,$mask,$acc0
537         add     $acc0,$car1,$car1
538         add     $acc1,$car1,$car1
539         st      $car1,[$tp]                     ! tp[j-1]
540         srlx    $car1,32,$car1
541
542         add     $car0,$car0,$car0
543         or      $sbit,$car0,$car0
544         add     $car0,$car1,$car1
545         add     $car2,$car1,$car1
546         st      $car1,[$tp+4]
547         srlx    $car1,32,$car2
548 \f
549         add     $i,4,$i                         ! i++
550         ld      [%sp+$bias+$frame],$tmp1        ! tp[0]
551         ld      [%sp+$bias+$frame+4],$tpj       ! tp[1]
552         ld      [$ap+$i],$mul0                  ! ap[j]
553         ld      [$np],$car1                     ! np[0]
554         ld      [$np+4],$npj                    ! np[1]
555         mulx    $n0,$tmp1,$mul1
556         and     $mul1,$mask,$mul1
557         add     $i,4,$tmp0
558
559         mulx    $mul0,$mul0,$car0
560         mulx    $car1,$mul1,$car1
561         and     $car0,$mask,$acc0
562         add     $tmp1,$car1,$car1
563         srlx    $car0,32,$car0
564         add     %sp,$bias+$frame,$tp
565         srlx    $car1,32,$car1
566         and     $car0,1,$sbit
567         srlx    $car0,1,$car0
568
569         cmp     $tmp0,$num                      ! i<num-1
570         bl      %icc,.Lsqr_outer
571         mov     4,$j
572 \f
573 .Lsqr_last:
574         mulx    $npj,$mul1,$acc1
575         add     $tpj,$car1,$car1
576         add     $j,4,$j
577         ld      [$tp+8],$tpj
578         cmp     $j,$i
579         add     $acc1,$car1,$car1
580         ld      [$np+$j],$npj
581         st      $car1,[$tp]
582         srlx    $car1,32,$car1
583         bl      %icc,.Lsqr_last
584         add     $tp,4,$tp
585 !.Lsqr_last
586
587         mulx    $npj,$mul1,$acc1
588         add     $tpj,$car1,$car1
589         add     $acc0,$car1,$car1
590         add     $acc1,$car1,$car1
591         st      $car1,[$tp]
592         srlx    $car1,32,$car1
593
594         add     $car0,$car0,$car0               ! recover $car0
595         or      $sbit,$car0,$car0
596         add     $car0,$car1,$car1
597         add     $car2,$car1,$car1
598         st      $car1,[$tp+4]
599         srlx    $car1,32,$car2
600
601         ba      .Ltail
602         add     $tp,8,$tp
603 .type   $fname,#function
604 .size   $fname,(.-$fname)
605 .asciz  "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
606 ___
607 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
608 print $code;
609 close STDOUT;