SPARC Solaris and Linux assemblers treat .align directive differently.
[openssl.git] / crypto / bn / asm / sparcv9-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # December 2005
11 #
12 # Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
13 # for undertaken effort are multiple. First of all, UltraSPARC is not
14 # the whole SPARCv9 universe and other VIS-free implementations deserve
15 # optimized code as much. Secondly, newly introduced UltraSPARC T1,
16 # a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
17 # such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
18 # several integrated RSA/DSA accelerator circuits accessible through
19 # kernel driver [only(*)], but having decent user-land software
20 # implementation is important too. Finally, reasons like desire to
21 # experiment with dedicated squaring procedure. Yes, this module
22 # implements one, because it was easiest to draft it in SPARCv9
23 # instructions...
24
25 # (*)   Engine accessing the driver in question is on my TODO list.
26 #       For reference, acceleator is estimated to give 6 to 10 times
27 #       improvement on single-threaded RSA sign. It should be noted
28 #       that 6-10x improvement coefficient does not actually mean
29 #       something extraordinary in terms of absolute [single-threaded]
30 #       performance, as SPARCv9 instruction set is by all means least
31 #       suitable for high performance crypto among other 64 bit
32 #       platforms. 6-10x factor simply places T1 in same performance
33 #       domain as say AMD64 and IA-64. Improvement of RSA verify don't
34 #       appear impressive at all, but it's the sign operation which is
35 #       far more critical/interesting.
36
37 # You might notice that inner loops are modulo-scheduled:-) This has
38 # essentially negligible impact on UltraSPARC performance, it's
39 # Fujitsu SPARC64 V users who should notice and hopefully appreciate
40 # the advantage... Currently this module surpasses sparcv9a-mont.pl
41 # by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
42 # module still have hidden potential [see TODO list there], which is
43 # estimated to be larger than 20%...
44
45 # int bn_mul_mont(
46 $rp="%i0";      # BN_ULONG *rp,
47 $ap="%i1";      # const BN_ULONG *ap,
48 $bp="%i2";      # const BN_ULONG *bp,
49 $np="%i3";      # const BN_ULONG *np,
50 $n0="%i4";      # const BN_ULONG *n0,
51 $num="%i5";     # int num);
52
53 $bits=32;
54 for (@ARGV)     { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
55 if ($bits==64)  { $bias=2047; $frame=192; }
56 else            { $bias=0;    $frame=128; }
57
58 $car0="%o0";
59 $car1="%o1";
60 $car2="%o2";    # 1 bit
61 $acc0="%o3";
62 $acc1="%o4";
63 $mask="%g1";    # 32 bits, what a waste...
64 $tmp0="%g4";
65 $tmp1="%g5";
66
67 $i="%l0";
68 $j="%l1";
69 $mul0="%l2";
70 $mul1="%l3";
71 $tp="%l4";
72 $apj="%l5";
73 $npj="%l6";
74 $tpj="%l7";
75
76 $fname="bn_mul_mont_int";
77
78 $code=<<___;
79 .section        ".text",#alloc,#execinstr
80
81 .global $fname
82 .align  32
83 $fname:
84         cmp     %o5,4                   ! 128 bits minimum
85         bge,pt  %icc,.Lenter
86         sethi   %hi(0xffffffff),$mask
87         retl
88         clr     %o0
89 .align  32
90 .Lenter:
91         save    %sp,-$frame,%sp
92         sll     $num,2,$num             ! num*=4
93         or      $mask,%lo(0xffffffff),$mask
94         ld      [$n0],$n0
95         cmp     $ap,$bp
96         and     $num,$mask,$num
97         ld      [$bp],$mul0             ! bp[0]
98         nop
99
100         add     %sp,$bias,%o7           ! real top of stack
101         ld      [$ap],$car0             ! ap[0] ! redundant in squaring context
102         sub     %o7,$num,%o7
103         ld      [$ap+4],$apj            ! ap[1]
104         and     %o7,-1024,%o7
105         ld      [$np],$car1             ! np[0]
106         sub     %o7,$bias,%sp           ! alloca
107         ld      [$np+4],$npj            ! np[1]
108         be,pt   `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont
109         mov     12,$j
110
111         mulx    $car0,$mul0,$car0       ! ap[0]*bp[0]
112         mulx    $apj,$mul0,$tmp0        !prologue! ap[1]*bp[0]
113         and     $car0,$mask,$acc0
114         add     %sp,$bias+$frame,$tp
115         ld      [$ap+8],$apj            !prologue!
116
117         mulx    $n0,$acc0,$mul1         ! "t[0]"*n0
118         and     $mul1,$mask,$mul1
119
120         mulx    $car1,$mul1,$car1       ! np[0]*"t[0]"*n0
121         mulx    $npj,$mul1,$acc1        !prologue! np[1]*"t[0]"*n0
122         srlx    $car0,32,$car0
123         add     $acc0,$car1,$car1
124         ld      [$np+8],$npj            !prologue!
125         srlx    $car1,32,$car1
126         mov     $tmp0,$acc0             !prologue!
127
128 .L1st:
129         mulx    $apj,$mul0,$tmp0
130         mulx    $npj,$mul1,$tmp1
131         add     $acc0,$car0,$car0
132         ld      [$ap+$j],$apj           ! ap[j]
133         and     $car0,$mask,$acc0
134         add     $acc1,$car1,$car1
135         ld      [$np+$j],$npj           ! np[j]
136         srlx    $car0,32,$car0
137         add     $acc0,$car1,$car1
138         add     $j,4,$j                 ! j++
139         mov     $tmp0,$acc0
140         st      $car1,[$tp]
141         cmp     $j,$num
142         mov     $tmp1,$acc1
143         srlx    $car1,32,$car1
144         bl      %icc,.L1st
145         add     $tp,4,$tp               ! tp++
146 !.L1st
147
148         mulx    $apj,$mul0,$tmp0        !epilogue!
149         mulx    $npj,$mul1,$tmp1
150         add     $acc0,$car0,$car0
151         and     $car0,$mask,$acc0
152         add     $acc1,$car1,$car1
153         srlx    $car0,32,$car0
154         add     $acc0,$car1,$car1
155         st      $car1,[$tp]
156         srlx    $car1,32,$car1
157
158         add     $tmp0,$car0,$car0
159         and     $car0,$mask,$acc0
160         add     $tmp1,$car1,$car1
161         srlx    $car0,32,$car0
162         add     $acc0,$car1,$car1
163         st      $car1,[$tp+4]
164         srlx    $car1,32,$car1
165
166         add     $car0,$car1,$car1
167         st      $car1,[$tp+8]
168         srlx    $car1,32,$car2
169 \f
170         mov     4,$i                    ! i++
171         ld      [$bp+4],$mul0           ! bp[1]
172 .Louter:
173         add     %sp,$bias+$frame,$tp
174         ld      [$ap],$car0             ! ap[0]
175         ld      [$ap+4],$apj            ! ap[1]
176         ld      [$np],$car1             ! np[0]
177         ld      [$np+4],$npj            ! np[1]
178         ld      [$tp],$tmp1             ! tp[0]
179         ld      [$tp+4],$tpj            ! tp[1]
180         mov     12,$j
181
182         mulx    $car0,$mul0,$car0
183         mulx    $apj,$mul0,$tmp0        !prologue!
184         add     $tmp1,$car0,$car0
185         ld      [$ap+8],$apj            !prologue!
186         and     $car0,$mask,$acc0
187
188         mulx    $n0,$acc0,$mul1
189         and     $mul1,$mask,$mul1
190
191         mulx    $car1,$mul1,$car1
192         mulx    $npj,$mul1,$acc1        !prologue!
193         srlx    $car0,32,$car0
194         add     $acc0,$car1,$car1
195         ld      [$np+8],$npj            !prologue!
196         srlx    $car1,32,$car1
197         mov     $tmp0,$acc0             !prologue!
198
199 .Linner:
200         mulx    $apj,$mul0,$tmp0
201         mulx    $npj,$mul1,$tmp1
202         add     $tpj,$car0,$car0
203         ld      [$ap+$j],$apj           ! ap[j]
204         add     $acc0,$car0,$car0
205         add     $acc1,$car1,$car1
206         ld      [$np+$j],$npj           ! np[j]
207         and     $car0,$mask,$acc0
208         ld      [$tp+8],$tpj            ! tp[j]
209         srlx    $car0,32,$car0
210         add     $acc0,$car1,$car1
211         add     $j,4,$j                 ! j++
212         mov     $tmp0,$acc0
213         st      $car1,[$tp]             ! tp[j-1]
214         srlx    $car1,32,$car1
215         mov     $tmp1,$acc1
216         cmp     $j,$num
217         bl      %icc,.Linner
218         add     $tp,4,$tp               ! tp++
219 !.Linner
220
221         mulx    $apj,$mul0,$tmp0        !epilogue!
222         mulx    $npj,$mul1,$tmp1
223         add     $tpj,$car0,$car0
224         add     $acc0,$car0,$car0
225         ld      [$tp+8],$tpj            ! tp[j]
226         and     $car0,$mask,$acc0
227         add     $acc1,$car1,$car1
228         srlx    $car0,32,$car0
229         add     $acc0,$car1,$car1
230         st      $car1,[$tp]             ! tp[j-1]
231         srlx    $car1,32,$car1
232
233         add     $tpj,$car0,$car0
234         add     $tmp0,$car0,$car0
235         and     $car0,$mask,$acc0
236         add     $tmp1,$car1,$car1
237         add     $acc0,$car1,$car1
238         st      $car1,[$tp+4]           ! tp[j-1]
239         srlx    $car0,32,$car0
240         add     $i,4,$i                 ! i++
241         srlx    $car1,32,$car1
242
243         add     $car0,$car1,$car1
244         cmp     $i,$num
245         add     $car2,$car1,$car1
246         st      $car1,[$tp+8]
247
248         srlx    $car1,32,$car2
249         bl,a    %icc,.Louter
250         ld      [$bp+$i],$mul0          ! bp[i]
251 !.Louter
252
253         add     $tp,12,$tp
254 \f
255 .Ltail:
256         add     $np,$num,$np
257         add     $rp,$num,$rp
258         mov     $tp,$ap
259         sub     %g0,$num,%o7            ! k=-num
260
261         srl     $npj,30,%o0             ! boundary condition...
262         brz,pn  %o0,.Lcopy              ! ... is met
263         nop
264
265         ba      .Lsub
266         subcc   %g0,%g0,%g0             ! clear %icc.c
267 .align  16
268 .Lsub:
269         ld      [$tp+%o7],%o0
270         ld      [$np+%o7],%o1
271         subccc  %o0,%o1,%o1             ! tp[j]-np[j]
272         add     $rp,%o7,$i
273         add     %o7,4,%o7
274         brnz    %o7,.Lsub
275         st      %o1,[$i]
276         subc    $car2,0,$car2           ! handle upmost overflow bit
277         and     $tp,$car2,$ap
278         andn    $rp,$car2,$np
279         or      $ap,$np,$ap
280         sub     %g0,$num,%o7
281
282 .Lcopy:
283         ld      [$ap+%o7],%o0           ! copy or in-place refresh
284         st      %g0,[$tp+%o7]           ! zap tp
285         st      %o0,[$rp+%o7]
286         add     %o7,4,%o7
287         brnz    %o7,.Lcopy
288         nop
289         mov     1,%i0
290         ret
291         restore
292 ___
293 \f
294 ########
295 ######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
296 ######## code without following dedicated squaring procedure.
297 ########
298 $sbit="%i2";            # re-use $bp!
299
300 $code.=<<___;
301 .align  32
302 .Lbn_sqr_mont:
303         mulx    $mul0,$mul0,$car0               ! ap[0]*ap[0]
304         mulx    $apj,$mul0,$tmp0                !prologue!
305         and     $car0,$mask,$acc0
306         add     %sp,$bias+$frame,$tp
307         ld      [$ap+8],$apj                    !prologue!
308
309         mulx    $n0,$acc0,$mul1                 ! "t[0]"*n0
310         srlx    $car0,32,$car0
311         and     $mul1,$mask,$mul1
312
313         mulx    $car1,$mul1,$car1               ! np[0]*"t[0]"*n0
314         mulx    $npj,$mul1,$acc1                !prologue!
315         and     $car0,1,$sbit
316         ld      [$np+8],$npj                    !prologue!
317         srlx    $car0,1,$car0
318         add     $acc0,$car1,$car1
319         srlx    $car1,32,$car1
320         mov     $tmp0,$acc0                     !prologue!
321
322 .Lsqr_1st:
323         mulx    $apj,$mul0,$tmp0
324         mulx    $npj,$mul1,$tmp1
325         add     $acc0,$car0,$car0               ! ap[j]*a0+c0
326         add     $acc1,$car1,$car1
327         ld      [$ap+$j],$apj                   ! ap[j]
328         and     $car0,$mask,$acc0
329         ld      [$np+$j],$npj                   ! np[j]
330         srlx    $car0,32,$car0
331         add     $acc0,$acc0,$acc0
332         or      $sbit,$acc0,$acc0
333         mov     $tmp1,$acc1
334         srlx    $acc0,32,$sbit
335         add     $j,4,$j                         ! j++
336         and     $acc0,$mask,$acc0
337         cmp     $j,$num
338         add     $acc0,$car1,$car1
339         st      $car1,[$tp]
340         mov     $tmp0,$acc0
341         srlx    $car1,32,$car1
342         bl      %icc,.Lsqr_1st
343         add     $tp,4,$tp                       ! tp++
344 !.Lsqr_1st
345
346         mulx    $apj,$mul0,$tmp0                ! epilogue
347         mulx    $npj,$mul1,$tmp1
348         add     $acc0,$car0,$car0               ! ap[j]*a0+c0
349         add     $acc1,$car1,$car1
350         and     $car0,$mask,$acc0
351         srlx    $car0,32,$car0
352         add     $acc0,$acc0,$acc0
353         or      $sbit,$acc0,$acc0
354         srlx    $acc0,32,$sbit
355         and     $acc0,$mask,$acc0
356         add     $acc0,$car1,$car1
357         st      $car1,[$tp]
358         srlx    $car1,32,$car1
359
360         add     $tmp0,$car0,$car0               ! ap[j]*a0+c0
361         add     $tmp1,$car1,$car1
362         and     $car0,$mask,$acc0
363         srlx    $car0,32,$car0
364         add     $acc0,$acc0,$acc0
365         or      $sbit,$acc0,$acc0
366         srlx    $acc0,32,$sbit
367         and     $acc0,$mask,$acc0
368         add     $acc0,$car1,$car1
369         st      $car1,[$tp+4]
370         srlx    $car1,32,$car1
371
372         add     $car0,$car0,$car0
373         or      $sbit,$car0,$car0
374         add     $car0,$car1,$car1
375         st      $car1,[$tp+8]
376         srlx    $car1,32,$car2
377 \f
378         ld      [%sp+$bias+$frame],$tmp0        ! tp[0]
379         ld      [%sp+$bias+$frame+4],$tmp1      ! tp[1]
380         ld      [%sp+$bias+$frame+8],$tpj       ! tp[2]
381         ld      [$ap+4],$mul0                   ! ap[1]
382         ld      [$ap+8],$apj                    ! ap[2]
383         ld      [$np],$car1                     ! np[0]
384         ld      [$np+4],$npj                    ! np[1]
385         mulx    $n0,$tmp0,$mul1
386
387         mulx    $mul0,$mul0,$car0
388         and     $mul1,$mask,$mul1
389
390         mulx    $car1,$mul1,$car1
391         mulx    $npj,$mul1,$acc1
392         add     $tmp0,$car1,$car1
393         and     $car0,$mask,$acc0
394         ld      [$np+8],$npj                    ! np[2]
395         srlx    $car1,32,$car1
396         add     $tmp1,$car1,$car1
397         srlx    $car0,32,$car0
398         add     $acc0,$car1,$car1
399         and     $car0,1,$sbit
400         add     $acc1,$car1,$car1
401         srlx    $car0,1,$car0
402         mov     12,$j
403         st      $car1,[%sp+$bias+$frame]        ! tp[0]=
404         srlx    $car1,32,$car1
405         add     %sp,$bias+$frame+4,$tp
406
407 .Lsqr_2nd:
408         mulx    $apj,$mul0,$acc0
409         mulx    $npj,$mul1,$acc1
410         add     $acc0,$car0,$car0
411         add     $tpj,$car1,$car1
412         ld      [$ap+$j],$apj                   ! ap[j]
413         and     $car0,$mask,$acc0
414         ld      [$np+$j],$npj                   ! np[j]
415         srlx    $car0,32,$car0
416         add     $acc1,$car1,$car1
417         ld      [$tp+8],$tpj                    ! tp[j]
418         add     $acc0,$acc0,$acc0
419         add     $j,4,$j                         ! j++
420         or      $sbit,$acc0,$acc0
421         srlx    $acc0,32,$sbit
422         and     $acc0,$mask,$acc0
423         cmp     $j,$num
424         add     $acc0,$car1,$car1
425         st      $car1,[$tp]                     ! tp[j-1]
426         srlx    $car1,32,$car1
427         bl      %icc,.Lsqr_2nd
428         add     $tp,4,$tp                       ! tp++
429 !.Lsqr_2nd
430
431         mulx    $apj,$mul0,$acc0
432         mulx    $npj,$mul1,$acc1
433         add     $acc0,$car0,$car0
434         add     $tpj,$car1,$car1
435         and     $car0,$mask,$acc0
436         srlx    $car0,32,$car0
437         add     $acc1,$car1,$car1
438         add     $acc0,$acc0,$acc0
439         or      $sbit,$acc0,$acc0
440         srlx    $acc0,32,$sbit
441         and     $acc0,$mask,$acc0
442         add     $acc0,$car1,$car1
443         st      $car1,[$tp]                     ! tp[j-1]
444         srlx    $car1,32,$car1
445
446         add     $car0,$car0,$car0
447         or      $sbit,$car0,$car0
448         add     $car0,$car1,$car1
449         add     $car2,$car1,$car1
450         st      $car1,[$tp+4]
451         srlx    $car1,32,$car2
452 \f
453         ld      [%sp+$bias+$frame],$tmp1        ! tp[0]
454         ld      [%sp+$bias+$frame+4],$tpj       ! tp[1]
455         ld      [$ap+8],$mul0                   ! ap[2]
456         ld      [$np],$car1                     ! np[0]
457         ld      [$np+4],$npj                    ! np[1]
458         mulx    $n0,$tmp1,$mul1
459         and     $mul1,$mask,$mul1
460         mov     8,$i
461
462         mulx    $mul0,$mul0,$car0
463         mulx    $car1,$mul1,$car1
464         and     $car0,$mask,$acc0
465         add     $tmp1,$car1,$car1
466         srlx    $car0,32,$car0
467         add     %sp,$bias+$frame,$tp
468         srlx    $car1,32,$car1
469         and     $car0,1,$sbit
470         srlx    $car0,1,$car0
471         mov     4,$j
472
473 .Lsqr_outer:
474 .Lsqr_inner1:
475         mulx    $npj,$mul1,$acc1
476         add     $tpj,$car1,$car1
477         add     $j,4,$j
478         ld      [$tp+8],$tpj
479         cmp     $j,$i
480         add     $acc1,$car1,$car1
481         ld      [$np+$j],$npj
482         st      $car1,[$tp]
483         srlx    $car1,32,$car1
484         bl      %icc,.Lsqr_inner1
485         add     $tp,4,$tp
486 !.Lsqr_inner1
487
488         add     $j,4,$j
489         ld      [$ap+$j],$apj                   ! ap[j]
490         mulx    $npj,$mul1,$acc1
491         add     $tpj,$car1,$car1
492         ld      [$np+$j],$npj                   ! np[j]
493         add     $acc0,$car1,$car1
494         ld      [$tp+8],$tpj                    ! tp[j]
495         add     $acc1,$car1,$car1
496         st      $car1,[$tp]
497         srlx    $car1,32,$car1
498
499         add     $j,4,$j
500         cmp     $j,$num
501         be,pn   %icc,.Lsqr_no_inner2
502         add     $tp,4,$tp
503
504 .Lsqr_inner2:
505         mulx    $apj,$mul0,$acc0
506         mulx    $npj,$mul1,$acc1
507         add     $tpj,$car1,$car1
508         add     $acc0,$car0,$car0
509         ld      [$ap+$j],$apj                   ! ap[j]
510         and     $car0,$mask,$acc0
511         ld      [$np+$j],$npj                   ! np[j]
512         srlx    $car0,32,$car0
513         add     $acc0,$acc0,$acc0
514         ld      [$tp+8],$tpj                    ! tp[j]
515         or      $sbit,$acc0,$acc0
516         add     $j,4,$j                         ! j++
517         srlx    $acc0,32,$sbit
518         and     $acc0,$mask,$acc0
519         cmp     $j,$num
520         add     $acc0,$car1,$car1
521         add     $acc1,$car1,$car1
522         st      $car1,[$tp]                     ! tp[j-1]
523         srlx    $car1,32,$car1
524         bl      %icc,.Lsqr_inner2
525         add     $tp,4,$tp                       ! tp++
526
527 .Lsqr_no_inner2:
528         mulx    $apj,$mul0,$acc0
529         mulx    $npj,$mul1,$acc1
530         add     $tpj,$car1,$car1
531         add     $acc0,$car0,$car0
532         and     $car0,$mask,$acc0
533         srlx    $car0,32,$car0
534         add     $acc0,$acc0,$acc0
535         or      $sbit,$acc0,$acc0
536         srlx    $acc0,32,$sbit
537         and     $acc0,$mask,$acc0
538         add     $acc0,$car1,$car1
539         add     $acc1,$car1,$car1
540         st      $car1,[$tp]                     ! tp[j-1]
541         srlx    $car1,32,$car1
542
543         add     $car0,$car0,$car0
544         or      $sbit,$car0,$car0
545         add     $car0,$car1,$car1
546         add     $car2,$car1,$car1
547         st      $car1,[$tp+4]
548         srlx    $car1,32,$car2
549 \f
550         add     $i,4,$i                         ! i++
551         ld      [%sp+$bias+$frame],$tmp1        ! tp[0]
552         ld      [%sp+$bias+$frame+4],$tpj       ! tp[1]
553         ld      [$ap+$i],$mul0                  ! ap[j]
554         ld      [$np],$car1                     ! np[0]
555         ld      [$np+4],$npj                    ! np[1]
556         mulx    $n0,$tmp1,$mul1
557         and     $mul1,$mask,$mul1
558         add     $i,4,$tmp0
559
560         mulx    $mul0,$mul0,$car0
561         mulx    $car1,$mul1,$car1
562         and     $car0,$mask,$acc0
563         add     $tmp1,$car1,$car1
564         srlx    $car0,32,$car0
565         add     %sp,$bias+$frame,$tp
566         srlx    $car1,32,$car1
567         and     $car0,1,$sbit
568         srlx    $car0,1,$car0
569
570         cmp     $tmp0,$num                      ! i<num-1
571         bl      %icc,.Lsqr_outer
572         mov     4,$j
573 \f
574 .Lsqr_last:
575         mulx    $npj,$mul1,$acc1
576         add     $tpj,$car1,$car1
577         add     $j,4,$j
578         ld      [$tp+8],$tpj
579         cmp     $j,$i
580         add     $acc1,$car1,$car1
581         ld      [$np+$j],$npj
582         st      $car1,[$tp]
583         srlx    $car1,32,$car1
584         bl      %icc,.Lsqr_last
585         add     $tp,4,$tp
586 !.Lsqr_last
587
588         mulx    $npj,$mul1,$acc1
589         add     $tpj,$car1,$car1
590         add     $acc0,$car1,$car1
591         add     $acc1,$car1,$car1
592         st      $car1,[$tp]
593         srlx    $car1,32,$car1
594
595         add     $car0,$car0,$car0               ! recover $car0
596         or      $sbit,$car0,$car0
597         add     $car0,$car1,$car1
598         add     $car2,$car1,$car1
599         st      $car1,[$tp+4]
600         srlx    $car1,32,$car2
601
602         ba      .Ltail
603         add     $tp,8,$tp
604 .type   $fname,#function
605 .size   $fname,(.-$fname)
606 .asciz  "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
607 .align  32
608 ___
609 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
610 print $code;
611 close STDOUT;