s390x assembler pack.
[openssl.git] / crypto / bn / asm / ppc.pl
1 #!/usr/bin/env perl
2 #
3 # Implemented as a Perl wrapper as we want to support several different
4 # architectures with single file. We pick up the target based on the
5 # file name we are asked to generate.
6 #
7 # It should be noted though that this perl code is nothing like
8 # <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
9 # as pre-processor to cover for platform differences in name decoration,
10 # linker tables, 32-/64-bit instruction sets...
11 #
12 # As you might know there're several PowerPC ABI in use. Most notably
13 # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
14 # are similar enough to implement leaf(!) functions, which would be ABI
15 # neutral. And that's what you find here: ABI neutral leaf functions.
16 # In case you wonder what that is...
17 #
18 #       AIX performance
19 #
20 #       MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
21 #
22 #       The following is the performance of 32-bit compiler
23 #       generated code:
24 #
25 #       OpenSSL 0.9.6c 21 dec 2001
26 #       built on: Tue Jun 11 11:06:51 EDT 2002
27 #       options:bn(64,32) ...
28 #compiler: cc -DTHREADS  -DAIX -DB_ENDIAN -DBN_LLONG -O3
29 #                  sign    verify    sign/s verify/s
30 #rsa  512 bits   0.0098s   0.0009s    102.0   1170.6
31 #rsa 1024 bits   0.0507s   0.0026s     19.7    387.5
32 #rsa 2048 bits   0.3036s   0.0085s      3.3    117.1
33 #rsa 4096 bits   2.0040s   0.0299s      0.5     33.4
34 #dsa  512 bits   0.0087s   0.0106s    114.3     94.5
35 #dsa 1024 bits   0.0256s   0.0313s     39.0     32.0    
36 #
37 #       Same bechmark with this assembler code:
38 #
39 #rsa  512 bits   0.0056s   0.0005s    178.6   2049.2
40 #rsa 1024 bits   0.0283s   0.0015s     35.3    674.1
41 #rsa 2048 bits   0.1744s   0.0050s      5.7    201.2
42 #rsa 4096 bits   1.1644s   0.0179s      0.9     55.7
43 #dsa  512 bits   0.0052s   0.0062s    191.6    162.0
44 #dsa 1024 bits   0.0149s   0.0180s     67.0     55.5
45 #
46 #       Number of operations increases by at almost 75%
47 #
48 #       Here are performance numbers for 64-bit compiler
49 #       generated code:
50 #
51 #       OpenSSL 0.9.6g [engine] 9 Aug 2002
52 #       built on: Fri Apr 18 16:59:20 EDT 2003
53 #       options:bn(64,64) ...
54 #       compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
55 #                  sign    verify    sign/s verify/s
56 #rsa  512 bits   0.0028s   0.0003s    357.1   3844.4
57 #rsa 1024 bits   0.0148s   0.0008s     67.5   1239.7
58 #rsa 2048 bits   0.0963s   0.0028s     10.4    353.0
59 #rsa 4096 bits   0.6538s   0.0102s      1.5     98.1
60 #dsa  512 bits   0.0026s   0.0032s    382.5    313.7
61 #dsa 1024 bits   0.0081s   0.0099s    122.8    100.6
62 #
63 #       Same benchmark with this assembler code:
64 #
65 #rsa  512 bits   0.0020s   0.0002s    510.4   6273.7
66 #rsa 1024 bits   0.0088s   0.0005s    114.1   2128.3
67 #rsa 2048 bits   0.0540s   0.0016s     18.5    622.5
68 #rsa 4096 bits   0.3700s   0.0058s      2.7    171.0
69 #dsa  512 bits   0.0016s   0.0020s    610.7    507.1
70 #dsa 1024 bits   0.0047s   0.0058s    212.5    173.2
71 #       
72 #       Again, performance increases by at about 75%
73 #
74 #       Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
75 #       OpenSSL 0.9.7c 30 Sep 2003
76 #
77 #       Original code.
78 #
79 #rsa  512 bits   0.0011s   0.0001s    906.1  11012.5
80 #rsa 1024 bits   0.0060s   0.0003s    166.6   3363.1
81 #rsa 2048 bits   0.0370s   0.0010s     27.1    982.4
82 #rsa 4096 bits   0.2426s   0.0036s      4.1    280.4
83 #dsa  512 bits   0.0010s   0.0012s   1038.1    841.5
84 #dsa 1024 bits   0.0030s   0.0037s    329.6    269.7
85 #dsa 2048 bits   0.0101s   0.0127s     98.9     78.6
86 #
87 #       Same benchmark with this assembler code:
88 #
89 #rsa  512 bits   0.0007s   0.0001s   1416.2  16645.9
90 #rsa 1024 bits   0.0036s   0.0002s    274.4   5380.6
91 #rsa 2048 bits   0.0222s   0.0006s     45.1   1589.5
92 #rsa 4096 bits   0.1469s   0.0022s      6.8    449.6
93 #dsa  512 bits   0.0006s   0.0007s   1664.2   1376.2
94 #dsa 1024 bits   0.0018s   0.0023s    545.0    442.2
95 #dsa 2048 bits   0.0061s   0.0075s    163.5    132.8
96 #
97 #        Performance increase of ~60%
98 #
99 #       If you have comments or suggestions to improve code send
100 #       me a note at schari@us.ibm.com
101 #
102
103 $opf = shift;
104
105 if ($opf =~ /32\.s/) {
106         $BITS=  32;
107         $BNSZ=  $BITS/8;
108         $ISA=   "\"ppc\"";
109
110         $LD=    "lwz";          # load
111         $LDU=   "lwzu";         # load and update
112         $ST=    "stw";          # store
113         $STU=   "stwu";         # store and update
114         $UMULL= "mullw";        # unsigned multiply low
115         $UMULH= "mulhwu";       # unsigned multiply high
116         $UDIV=  "divwu";        # unsigned divide
117         $UCMPI= "cmplwi";       # unsigned compare with immediate
118         $UCMP=  "cmplw";        # unsigned compare
119         $CNTLZ= "cntlzw";       # count leading zeros
120         $SHL=   "slw";          # shift left
121         $SHR=   "srw";          # unsigned shift right
122         $SHRI=  "srwi";         # unsigned shift right by immediate     
123         $SHLI=  "slwi";         # shift left by immediate
124         $CLRU=  "clrlwi";       # clear upper bits
125         $INSR=  "insrwi";       # insert right
126         $ROTL=  "rotlwi";       # rotate left by immediate
127         $TR=    "tw";           # conditional trap
128 } elsif ($opf =~ /64\.s/) {
129         $BITS=  64;
130         $BNSZ=  $BITS/8;
131         $ISA=   "\"ppc64\"";
132
133         # same as above, but 64-bit mnemonics...
134         $LD=    "ld";           # load
135         $LDU=   "ldu";          # load and update
136         $ST=    "std";          # store
137         $STU=   "stdu";         # store and update
138         $UMULL= "mulld";        # unsigned multiply low
139         $UMULH= "mulhdu";       # unsigned multiply high
140         $UDIV=  "divdu";        # unsigned divide
141         $UCMPI= "cmpldi";       # unsigned compare with immediate
142         $UCMP=  "cmpld";        # unsigned compare
143         $CNTLZ= "cntlzd";       # count leading zeros
144         $SHL=   "sld";          # shift left
145         $SHR=   "srd";          # unsigned shift right
146         $SHRI=  "srdi";         # unsigned shift right by immediate     
147         $SHLI=  "sldi";         # shift left by immediate
148         $CLRU=  "clrldi";       # clear upper bits
149         $INSR=  "insrdi";       # insert right 
150         $ROTL=  "rotldi";       # rotate left by immediate
151         $TR=    "td";           # conditional trap
152 } else { die "nonsense $opf"; }
153
154 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
155 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
156 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
157 die "can't locate ppc-xlate.pl";
158
159 ( defined shift || open STDOUT,"| $^X $xlate $opf" ) ||
160         die "can't call $xlate: $!";
161
162 $data=<<EOF;
163 #--------------------------------------------------------------------
164 #
165 #
166 #
167 #
168 #       File:           ppc32.s
169 #
170 #       Created by:     Suresh Chari
171 #                       IBM Thomas J. Watson Research Library
172 #                       Hawthorne, NY
173 #
174 #
175 #       Description:    Optimized assembly routines for OpenSSL crypto
176 #                       on the 32 bitPowerPC platform.
177 #
178 #
179 #       Version History
180 #
181 #       2. Fixed bn_add,bn_sub and bn_div_words, added comments,
182 #          cleaned up code. Also made a single version which can
183 #          be used for both the AIX and Linux compilers. See NOTE
184 #          below.
185 #                               12/05/03                Suresh Chari
186 #                       (with lots of help from)        Andy Polyakov
187 ##      
188 #       1. Initial version      10/20/02                Suresh Chari
189 #
190 #
191 #       The following file works for the xlc,cc
192 #       and gcc compilers.
193 #
194 #       NOTE:   To get the file to link correctly with the gcc compiler
195 #               you have to change the names of the routines and remove
196 #               the first .(dot) character. This should automatically
197 #               be done in the build process.
198 #
199 #       Hand optimized assembly code for the following routines
200 #       
201 #       bn_sqr_comba4
202 #       bn_sqr_comba8
203 #       bn_mul_comba4
204 #       bn_mul_comba8
205 #       bn_sub_words
206 #       bn_add_words
207 #       bn_div_words
208 #       bn_sqr_words
209 #       bn_mul_words
210 #       bn_mul_add_words
211 #
212 #       NOTE:   It is possible to optimize this code more for
213 #       specific PowerPC or Power architectures. On the Northstar
214 #       architecture the optimizations in this file do
215 #        NOT provide much improvement.
216 #
217 #       If you have comments or suggestions to improve code send
218 #       me a note at schari\@us.ibm.com
219 #
220 #--------------------------------------------------------------------------
221 #
222 #       Defines to be used in the assembly code.
223 #       
224 #.set r0,0      # we use it as storage for value of 0
225 #.set SP,1      # preserved
226 #.set RTOC,2    # preserved 
227 #.set r3,3      # 1st argument/return value
228 #.set r4,4      # 2nd argument/volatile register
229 #.set r5,5      # 3rd argument/volatile register
230 #.set r6,6      # ...
231 #.set r7,7
232 #.set r8,8
233 #.set r9,9
234 #.set r10,10
235 #.set r11,11
236 #.set r12,12
237 #.set r13,13    # not used, nor any other "below" it...
238
239 #       Declare function names to be global
240 #       NOTE:   For gcc these names MUST be changed to remove
241 #               the first . i.e. for example change ".bn_sqr_comba4"
242 #               to "bn_sqr_comba4". This should be automatically done
243 #               in the build.
244         
245         .globl  .bn_sqr_comba4
246         .globl  .bn_sqr_comba8
247         .globl  .bn_mul_comba4
248         .globl  .bn_mul_comba8
249         .globl  .bn_sub_words
250         .globl  .bn_add_words
251         .globl  .bn_div_words
252         .globl  .bn_sqr_words
253         .globl  .bn_mul_words
254         .globl  .bn_mul_add_words
255         
256 # .text section
257         
258         .machine        $ISA
259
260 #
261 #       NOTE:   The following label name should be changed to
262 #               "bn_sqr_comba4" i.e. remove the first dot
263 #               for the gcc compiler. This should be automatically
264 #               done in the build
265 #
266
267 .align  4
268 .bn_sqr_comba4:
269 #
270 # Optimized version of bn_sqr_comba4.
271 #
272 # void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
273 # r3 contains r
274 # r4 contains a
275 #
276 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:       
277
278 # r5,r6 are the two BN_ULONGs being multiplied.
279 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
280 # r9,r10, r11 are the equivalents of c1,c2, c3.
281 # Here's the assembly
282 #
283 #
284         xor             r0,r0,r0                # set r0 = 0. Used in the addze
285                                                 # instructions below
286         
287                                                 #sqr_add_c(a,0,c1,c2,c3)
288         $LD             r5,`0*$BNSZ`(r4)                
289         $UMULL          r9,r5,r5                
290         $UMULH          r10,r5,r5               #in first iteration. No need
291                                                 #to add since c1=c2=c3=0.
292                                                 # Note c3(r11) is NOT set to 0
293                                                 # but will be.
294
295         $ST             r9,`0*$BNSZ`(r3)        # r[0]=c1;
296                                                 # sqr_add_c2(a,1,0,c2,c3,c1);
297         $LD             r6,`1*$BNSZ`(r4)                
298         $UMULL          r7,r5,r6
299         $UMULH          r8,r5,r6
300                                         
301         addc            r7,r7,r7                # compute (r7,r8)=2*(r7,r8)
302         adde            r8,r8,r8
303         addze           r9,r0                   # catch carry if any.
304                                                 # r9= r0(=0) and carry 
305         
306         addc            r10,r7,r10              # now add to temp result.
307         addze           r11,r8                  # r8 added to r11 which is 0 
308         addze           r9,r9
309         
310         $ST             r10,`1*$BNSZ`(r3)       #r[1]=c2; 
311                                                 #sqr_add_c(a,1,c3,c1,c2)
312         $UMULL          r7,r6,r6
313         $UMULH          r8,r6,r6
314         addc            r11,r7,r11
315         adde            r9,r8,r9
316         addze           r10,r0
317                                                 #sqr_add_c2(a,2,0,c3,c1,c2)
318         $LD             r6,`2*$BNSZ`(r4)
319         $UMULL          r7,r5,r6
320         $UMULH          r8,r5,r6
321         
322         addc            r7,r7,r7
323         adde            r8,r8,r8
324         addze           r10,r10
325         
326         addc            r11,r7,r11
327         adde            r9,r8,r9
328         addze           r10,r10
329         $ST             r11,`2*$BNSZ`(r3)       #r[2]=c3 
330                                                 #sqr_add_c2(a,3,0,c1,c2,c3);
331         $LD             r6,`3*$BNSZ`(r4)                
332         $UMULL          r7,r5,r6
333         $UMULH          r8,r5,r6
334         addc            r7,r7,r7
335         adde            r8,r8,r8
336         addze           r11,r0
337         
338         addc            r9,r7,r9
339         adde            r10,r8,r10
340         addze           r11,r11
341                                                 #sqr_add_c2(a,2,1,c1,c2,c3);
342         $LD             r5,`1*$BNSZ`(r4)
343         $LD             r6,`2*$BNSZ`(r4)
344         $UMULL          r7,r5,r6
345         $UMULH          r8,r5,r6
346         
347         addc            r7,r7,r7
348         adde            r8,r8,r8
349         addze           r11,r11
350         addc            r9,r7,r9
351         adde            r10,r8,r10
352         addze           r11,r11
353         $ST             r9,`3*$BNSZ`(r3)        #r[3]=c1
354                                                 #sqr_add_c(a,2,c2,c3,c1);
355         $UMULL          r7,r6,r6
356         $UMULH          r8,r6,r6
357         addc            r10,r7,r10
358         adde            r11,r8,r11
359         addze           r9,r0
360                                                 #sqr_add_c2(a,3,1,c2,c3,c1);
361         $LD             r6,`3*$BNSZ`(r4)                
362         $UMULL          r7,r5,r6
363         $UMULH          r8,r5,r6
364         addc            r7,r7,r7
365         adde            r8,r8,r8
366         addze           r9,r9
367         
368         addc            r10,r7,r10
369         adde            r11,r8,r11
370         addze           r9,r9
371         $ST             r10,`4*$BNSZ`(r3)       #r[4]=c2
372                                                 #sqr_add_c2(a,3,2,c3,c1,c2);
373         $LD             r5,`2*$BNSZ`(r4)                
374         $UMULL          r7,r5,r6
375         $UMULH          r8,r5,r6
376         addc            r7,r7,r7
377         adde            r8,r8,r8
378         addze           r10,r0
379         
380         addc            r11,r7,r11
381         adde            r9,r8,r9
382         addze           r10,r10
383         $ST             r11,`5*$BNSZ`(r3)       #r[5] = c3
384                                                 #sqr_add_c(a,3,c1,c2,c3);
385         $UMULL          r7,r6,r6                
386         $UMULH          r8,r6,r6
387         addc            r9,r7,r9
388         adde            r10,r8,r10
389
390         $ST             r9,`6*$BNSZ`(r3)        #r[6]=c1
391         $ST             r10,`7*$BNSZ`(r3)       #r[7]=c2
392         blr
393         .long   0x00000000
394
395 #
396 #       NOTE:   The following label name should be changed to
397 #               "bn_sqr_comba8" i.e. remove the first dot
398 #               for the gcc compiler. This should be automatically
399 #               done in the build
400 #
401         
402 .align  4
403 .bn_sqr_comba8:
404 #
405 # This is an optimized version of the bn_sqr_comba8 routine.
406 # Tightly uses the adde instruction
407 #
408 #
409 # void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
410 # r3 contains r
411 # r4 contains a
412 #
413 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:       
414
415 # r5,r6 are the two BN_ULONGs being multiplied.
416 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
417 # r9,r10, r11 are the equivalents of c1,c2, c3.
418 #
419 # Possible optimization of loading all 8 longs of a into registers
420 # doesnt provide any speedup
421
422
423         xor             r0,r0,r0                #set r0 = 0.Used in addze
424                                                 #instructions below.
425
426                                                 #sqr_add_c(a,0,c1,c2,c3);
427         $LD             r5,`0*$BNSZ`(r4)
428         $UMULL          r9,r5,r5                #1st iteration: no carries.
429         $UMULH          r10,r5,r5
430         $ST             r9,`0*$BNSZ`(r3)        # r[0]=c1;
431                                                 #sqr_add_c2(a,1,0,c2,c3,c1);
432         $LD             r6,`1*$BNSZ`(r4)
433         $UMULL          r7,r5,r6
434         $UMULH          r8,r5,r6        
435         
436         addc            r10,r7,r10              #add the two register number
437         adde            r11,r8,r0               # (r8,r7) to the three register
438         addze           r9,r0                   # number (r9,r11,r10).NOTE:r0=0
439         
440         addc            r10,r7,r10              #add the two register number
441         adde            r11,r8,r11              # (r8,r7) to the three register
442         addze           r9,r9                   # number (r9,r11,r10).
443         
444         $ST             r10,`1*$BNSZ`(r3)       # r[1]=c2
445                                 
446                                                 #sqr_add_c(a,1,c3,c1,c2);
447         $UMULL          r7,r6,r6
448         $UMULH          r8,r6,r6
449         addc            r11,r7,r11
450         adde            r9,r8,r9
451         addze           r10,r0
452                                                 #sqr_add_c2(a,2,0,c3,c1,c2);
453         $LD             r6,`2*$BNSZ`(r4)
454         $UMULL          r7,r5,r6
455         $UMULH          r8,r5,r6
456         
457         addc            r11,r7,r11
458         adde            r9,r8,r9
459         addze           r10,r10
460         
461         addc            r11,r7,r11
462         adde            r9,r8,r9
463         addze           r10,r10
464         
465         $ST             r11,`2*$BNSZ`(r3)       #r[2]=c3
466                                                 #sqr_add_c2(a,3,0,c1,c2,c3);
467         $LD             r6,`3*$BNSZ`(r4)        #r6 = a[3]. r5 is already a[0].
468         $UMULL          r7,r5,r6
469         $UMULH          r8,r5,r6
470         
471         addc            r9,r7,r9
472         adde            r10,r8,r10
473         addze           r11,r0
474         
475         addc            r9,r7,r9
476         adde            r10,r8,r10
477         addze           r11,r11
478                                                 #sqr_add_c2(a,2,1,c1,c2,c3);
479         $LD             r5,`1*$BNSZ`(r4)
480         $LD             r6,`2*$BNSZ`(r4)
481         $UMULL          r7,r5,r6
482         $UMULH          r8,r5,r6
483         
484         addc            r9,r7,r9
485         adde            r10,r8,r10
486         addze           r11,r11
487         
488         addc            r9,r7,r9
489         adde            r10,r8,r10
490         addze           r11,r11
491         
492         $ST             r9,`3*$BNSZ`(r3)        #r[3]=c1;
493                                                 #sqr_add_c(a,2,c2,c3,c1);
494         $UMULL          r7,r6,r6
495         $UMULH          r8,r6,r6
496         
497         addc            r10,r7,r10
498         adde            r11,r8,r11
499         addze           r9,r0
500                                                 #sqr_add_c2(a,3,1,c2,c3,c1);
501         $LD             r6,`3*$BNSZ`(r4)
502         $UMULL          r7,r5,r6
503         $UMULH          r8,r5,r6
504         
505         addc            r10,r7,r10
506         adde            r11,r8,r11
507         addze           r9,r9
508         
509         addc            r10,r7,r10
510         adde            r11,r8,r11
511         addze           r9,r9
512                                                 #sqr_add_c2(a,4,0,c2,c3,c1);
513         $LD             r5,`0*$BNSZ`(r4)
514         $LD             r6,`4*$BNSZ`(r4)
515         $UMULL          r7,r5,r6
516         $UMULH          r8,r5,r6
517         
518         addc            r10,r7,r10
519         adde            r11,r8,r11
520         addze           r9,r9
521         
522         addc            r10,r7,r10
523         adde            r11,r8,r11
524         addze           r9,r9
525         $ST             r10,`4*$BNSZ`(r3)       #r[4]=c2;
526                                                 #sqr_add_c2(a,5,0,c3,c1,c2);
527         $LD             r6,`5*$BNSZ`(r4)
528         $UMULL          r7,r5,r6
529         $UMULH          r8,r5,r6
530         
531         addc            r11,r7,r11
532         adde            r9,r8,r9
533         addze           r10,r0
534         
535         addc            r11,r7,r11
536         adde            r9,r8,r9
537         addze           r10,r10
538                                                 #sqr_add_c2(a,4,1,c3,c1,c2);
539         $LD             r5,`1*$BNSZ`(r4)
540         $LD             r6,`4*$BNSZ`(r4)
541         $UMULL          r7,r5,r6
542         $UMULH          r8,r5,r6
543         
544         addc            r11,r7,r11
545         adde            r9,r8,r9
546         addze           r10,r10
547         
548         addc            r11,r7,r11
549         adde            r9,r8,r9
550         addze           r10,r10
551                                                 #sqr_add_c2(a,3,2,c3,c1,c2);
552         $LD             r5,`2*$BNSZ`(r4)
553         $LD             r6,`3*$BNSZ`(r4)
554         $UMULL          r7,r5,r6
555         $UMULH          r8,r5,r6
556         
557         addc            r11,r7,r11
558         adde            r9,r8,r9
559         addze           r10,r10
560         
561         addc            r11,r7,r11
562         adde            r9,r8,r9
563         addze           r10,r10
564         $ST             r11,`5*$BNSZ`(r3)       #r[5]=c3;
565                                                 #sqr_add_c(a,3,c1,c2,c3);
566         $UMULL          r7,r6,r6
567         $UMULH          r8,r6,r6
568         addc            r9,r7,r9
569         adde            r10,r8,r10
570         addze           r11,r0
571                                                 #sqr_add_c2(a,4,2,c1,c2,c3);
572         $LD             r6,`4*$BNSZ`(r4)
573         $UMULL          r7,r5,r6
574         $UMULH          r8,r5,r6
575         
576         addc            r9,r7,r9
577         adde            r10,r8,r10
578         addze           r11,r11
579         
580         addc            r9,r7,r9
581         adde            r10,r8,r10
582         addze           r11,r11
583                                                 #sqr_add_c2(a,5,1,c1,c2,c3);
584         $LD             r5,`1*$BNSZ`(r4)
585         $LD             r6,`5*$BNSZ`(r4)
586         $UMULL          r7,r5,r6
587         $UMULH          r8,r5,r6
588         
589         addc            r9,r7,r9
590         adde            r10,r8,r10
591         addze           r11,r11
592         
593         addc            r9,r7,r9
594         adde            r10,r8,r10
595         addze           r11,r11
596                                                 #sqr_add_c2(a,6,0,c1,c2,c3);
597         $LD             r5,`0*$BNSZ`(r4)
598         $LD             r6,`6*$BNSZ`(r4)
599         $UMULL          r7,r5,r6
600         $UMULH          r8,r5,r6
601         addc            r9,r7,r9
602         adde            r10,r8,r10
603         addze           r11,r11
604         addc            r9,r7,r9
605         adde            r10,r8,r10
606         addze           r11,r11
607         $ST             r9,`6*$BNSZ`(r3)        #r[6]=c1;
608                                                 #sqr_add_c2(a,7,0,c2,c3,c1);
609         $LD             r6,`7*$BNSZ`(r4)
610         $UMULL          r7,r5,r6
611         $UMULH          r8,r5,r6
612         
613         addc            r10,r7,r10
614         adde            r11,r8,r11
615         addze           r9,r0
616         addc            r10,r7,r10
617         adde            r11,r8,r11
618         addze           r9,r9
619                                                 #sqr_add_c2(a,6,1,c2,c3,c1);
620         $LD             r5,`1*$BNSZ`(r4)
621         $LD             r6,`6*$BNSZ`(r4)
622         $UMULL          r7,r5,r6
623         $UMULH          r8,r5,r6
624         
625         addc            r10,r7,r10
626         adde            r11,r8,r11
627         addze           r9,r9
628         addc            r10,r7,r10
629         adde            r11,r8,r11
630         addze           r9,r9
631                                                 #sqr_add_c2(a,5,2,c2,c3,c1);
632         $LD             r5,`2*$BNSZ`(r4)
633         $LD             r6,`5*$BNSZ`(r4)
634         $UMULL          r7,r5,r6
635         $UMULH          r8,r5,r6
636         addc            r10,r7,r10
637         adde            r11,r8,r11
638         addze           r9,r9
639         addc            r10,r7,r10
640         adde            r11,r8,r11
641         addze           r9,r9
642                                                 #sqr_add_c2(a,4,3,c2,c3,c1);
643         $LD             r5,`3*$BNSZ`(r4)
644         $LD             r6,`4*$BNSZ`(r4)
645         $UMULL          r7,r5,r6
646         $UMULH          r8,r5,r6
647         
648         addc            r10,r7,r10
649         adde            r11,r8,r11
650         addze           r9,r9
651         addc            r10,r7,r10
652         adde            r11,r8,r11
653         addze           r9,r9
654         $ST             r10,`7*$BNSZ`(r3)       #r[7]=c2;
655                                                 #sqr_add_c(a,4,c3,c1,c2);
656         $UMULL          r7,r6,r6
657         $UMULH          r8,r6,r6
658         addc            r11,r7,r11
659         adde            r9,r8,r9
660         addze           r10,r0
661                                                 #sqr_add_c2(a,5,3,c3,c1,c2);
662         $LD             r6,`5*$BNSZ`(r4)
663         $UMULL          r7,r5,r6
664         $UMULH          r8,r5,r6
665         addc            r11,r7,r11
666         adde            r9,r8,r9
667         addze           r10,r10
668         addc            r11,r7,r11
669         adde            r9,r8,r9
670         addze           r10,r10
671                                                 #sqr_add_c2(a,6,2,c3,c1,c2);
672         $LD             r5,`2*$BNSZ`(r4)
673         $LD             r6,`6*$BNSZ`(r4)
674         $UMULL          r7,r5,r6
675         $UMULH          r8,r5,r6
676         addc            r11,r7,r11
677         adde            r9,r8,r9
678         addze           r10,r10
679         
680         addc            r11,r7,r11
681         adde            r9,r8,r9
682         addze           r10,r10
683                                                 #sqr_add_c2(a,7,1,c3,c1,c2);
684         $LD             r5,`1*$BNSZ`(r4)
685         $LD             r6,`7*$BNSZ`(r4)
686         $UMULL          r7,r5,r6
687         $UMULH          r8,r5,r6
688         addc            r11,r7,r11
689         adde            r9,r8,r9
690         addze           r10,r10
691         addc            r11,r7,r11
692         adde            r9,r8,r9
693         addze           r10,r10
694         $ST             r11,`8*$BNSZ`(r3)       #r[8]=c3;
695                                                 #sqr_add_c2(a,7,2,c1,c2,c3);
696         $LD             r5,`2*$BNSZ`(r4)
697         $UMULL          r7,r5,r6
698         $UMULH          r8,r5,r6
699         
700         addc            r9,r7,r9
701         adde            r10,r8,r10
702         addze           r11,r0
703         addc            r9,r7,r9
704         adde            r10,r8,r10
705         addze           r11,r11
706                                                 #sqr_add_c2(a,6,3,c1,c2,c3);
707         $LD             r5,`3*$BNSZ`(r4)
708         $LD             r6,`6*$BNSZ`(r4)
709         $UMULL          r7,r5,r6
710         $UMULH          r8,r5,r6
711         addc            r9,r7,r9
712         adde            r10,r8,r10
713         addze           r11,r11
714         addc            r9,r7,r9
715         adde            r10,r8,r10
716         addze           r11,r11
717                                                 #sqr_add_c2(a,5,4,c1,c2,c3);
718         $LD             r5,`4*$BNSZ`(r4)
719         $LD             r6,`5*$BNSZ`(r4)
720         $UMULL          r7,r5,r6
721         $UMULH          r8,r5,r6
722         addc            r9,r7,r9
723         adde            r10,r8,r10
724         addze           r11,r11
725         addc            r9,r7,r9
726         adde            r10,r8,r10
727         addze           r11,r11
728         $ST             r9,`9*$BNSZ`(r3)        #r[9]=c1;
729                                                 #sqr_add_c(a,5,c2,c3,c1);
730         $UMULL          r7,r6,r6
731         $UMULH          r8,r6,r6
732         addc            r10,r7,r10
733         adde            r11,r8,r11
734         addze           r9,r0
735                                                 #sqr_add_c2(a,6,4,c2,c3,c1);
736         $LD             r6,`6*$BNSZ`(r4)
737         $UMULL          r7,r5,r6
738         $UMULH          r8,r5,r6
739         addc            r10,r7,r10
740         adde            r11,r8,r11
741         addze           r9,r9
742         addc            r10,r7,r10
743         adde            r11,r8,r11
744         addze           r9,r9
745                                                 #sqr_add_c2(a,7,3,c2,c3,c1);
746         $LD             r5,`3*$BNSZ`(r4)
747         $LD             r6,`7*$BNSZ`(r4)
748         $UMULL          r7,r5,r6
749         $UMULH          r8,r5,r6
750         addc            r10,r7,r10
751         adde            r11,r8,r11
752         addze           r9,r9
753         addc            r10,r7,r10
754         adde            r11,r8,r11
755         addze           r9,r9
756         $ST             r10,`10*$BNSZ`(r3)      #r[10]=c2;
757                                                 #sqr_add_c2(a,7,4,c3,c1,c2);
758         $LD             r5,`4*$BNSZ`(r4)
759         $UMULL          r7,r5,r6
760         $UMULH          r8,r5,r6
761         addc            r11,r7,r11
762         adde            r9,r8,r9
763         addze           r10,r0
764         addc            r11,r7,r11
765         adde            r9,r8,r9
766         addze           r10,r10
767                                                 #sqr_add_c2(a,6,5,c3,c1,c2);
768         $LD             r5,`5*$BNSZ`(r4)
769         $LD             r6,`6*$BNSZ`(r4)
770         $UMULL          r7,r5,r6
771         $UMULH          r8,r5,r6
772         addc            r11,r7,r11
773         adde            r9,r8,r9
774         addze           r10,r10
775         addc            r11,r7,r11
776         adde            r9,r8,r9
777         addze           r10,r10
778         $ST             r11,`11*$BNSZ`(r3)      #r[11]=c3;
779                                                 #sqr_add_c(a,6,c1,c2,c3);
780         $UMULL          r7,r6,r6
781         $UMULH          r8,r6,r6
782         addc            r9,r7,r9
783         adde            r10,r8,r10
784         addze           r11,r0
785                                                 #sqr_add_c2(a,7,5,c1,c2,c3)
786         $LD             r6,`7*$BNSZ`(r4)
787         $UMULL          r7,r5,r6
788         $UMULH          r8,r5,r6
789         addc            r9,r7,r9
790         adde            r10,r8,r10
791         addze           r11,r11
792         addc            r9,r7,r9
793         adde            r10,r8,r10
794         addze           r11,r11
795         $ST             r9,`12*$BNSZ`(r3)       #r[12]=c1;
796         
797                                                 #sqr_add_c2(a,7,6,c2,c3,c1)
798         $LD             r5,`6*$BNSZ`(r4)
799         $UMULL          r7,r5,r6
800         $UMULH          r8,r5,r6
801         addc            r10,r7,r10
802         adde            r11,r8,r11
803         addze           r9,r0
804         addc            r10,r7,r10
805         adde            r11,r8,r11
806         addze           r9,r9
807         $ST             r10,`13*$BNSZ`(r3)      #r[13]=c2;
808                                                 #sqr_add_c(a,7,c3,c1,c2);
809         $UMULL          r7,r6,r6
810         $UMULH          r8,r6,r6
811         addc            r11,r7,r11
812         adde            r9,r8,r9
813         $ST             r11,`14*$BNSZ`(r3)      #r[14]=c3;
814         $ST             r9, `15*$BNSZ`(r3)      #r[15]=c1;
815
816
817         blr
818
819         .long   0x00000000
820
821 #
822 #       NOTE:   The following label name should be changed to
823 #               "bn_mul_comba4" i.e. remove the first dot
824 #               for the gcc compiler. This should be automatically
825 #               done in the build
826 #
827
828 .align  4
829 .bn_mul_comba4:
830 #
831 # This is an optimized version of the bn_mul_comba4 routine.
832 #
833 # void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
834 # r3 contains r
835 # r4 contains a
836 # r5 contains b
837 # r6, r7 are the 2 BN_ULONGs being multiplied.
838 # r8, r9 are the results of the 32x32 giving 64 multiply.
839 # r10, r11, r12 are the equivalents of c1, c2, and c3.
840 #
841         xor     r0,r0,r0                #r0=0. Used in addze below.
842                                         #mul_add_c(a[0],b[0],c1,c2,c3);
843         $LD     r6,`0*$BNSZ`(r4)                
844         $LD     r7,`0*$BNSZ`(r5)                
845         $UMULL  r10,r6,r7               
846         $UMULH  r11,r6,r7               
847         $ST     r10,`0*$BNSZ`(r3)       #r[0]=c1
848                                         #mul_add_c(a[0],b[1],c2,c3,c1);
849         $LD     r7,`1*$BNSZ`(r5)                
850         $UMULL  r8,r6,r7
851         $UMULH  r9,r6,r7
852         addc    r11,r8,r11
853         adde    r12,r9,r0
854         addze   r10,r0
855                                         #mul_add_c(a[1],b[0],c2,c3,c1);
856         $LD     r6, `1*$BNSZ`(r4)               
857         $LD     r7, `0*$BNSZ`(r5)               
858         $UMULL  r8,r6,r7
859         $UMULH  r9,r6,r7
860         addc    r11,r8,r11
861         adde    r12,r9,r12
862         addze   r10,r10
863         $ST     r11,`1*$BNSZ`(r3)       #r[1]=c2
864                                         #mul_add_c(a[2],b[0],c3,c1,c2);
865         $LD     r6,`2*$BNSZ`(r4)                
866         $UMULL  r8,r6,r7
867         $UMULH  r9,r6,r7
868         addc    r12,r8,r12
869         adde    r10,r9,r10
870         addze   r11,r0
871                                         #mul_add_c(a[1],b[1],c3,c1,c2);
872         $LD     r6,`1*$BNSZ`(r4)                
873         $LD     r7,`1*$BNSZ`(r5)                
874         $UMULL  r8,r6,r7
875         $UMULH  r9,r6,r7
876         addc    r12,r8,r12
877         adde    r10,r9,r10
878         addze   r11,r11
879                                         #mul_add_c(a[0],b[2],c3,c1,c2);
880         $LD     r6,`0*$BNSZ`(r4)                
881         $LD     r7,`2*$BNSZ`(r5)                
882         $UMULL  r8,r6,r7
883         $UMULH  r9,r6,r7
884         addc    r12,r8,r12
885         adde    r10,r9,r10
886         addze   r11,r11
887         $ST     r12,`2*$BNSZ`(r3)       #r[2]=c3
888                                         #mul_add_c(a[0],b[3],c1,c2,c3);
889         $LD     r7,`3*$BNSZ`(r5)                
890         $UMULL  r8,r6,r7
891         $UMULH  r9,r6,r7
892         addc    r10,r8,r10
893         adde    r11,r9,r11
894         addze   r12,r0
895                                         #mul_add_c(a[1],b[2],c1,c2,c3);
896         $LD     r6,`1*$BNSZ`(r4)
897         $LD     r7,`2*$BNSZ`(r5)
898         $UMULL  r8,r6,r7
899         $UMULH  r9,r6,r7
900         addc    r10,r8,r10
901         adde    r11,r9,r11
902         addze   r12,r12
903                                         #mul_add_c(a[2],b[1],c1,c2,c3);
904         $LD     r6,`2*$BNSZ`(r4)
905         $LD     r7,`1*$BNSZ`(r5)
906         $UMULL  r8,r6,r7
907         $UMULH  r9,r6,r7
908         addc    r10,r8,r10
909         adde    r11,r9,r11
910         addze   r12,r12
911                                         #mul_add_c(a[3],b[0],c1,c2,c3);
912         $LD     r6,`3*$BNSZ`(r4)
913         $LD     r7,`0*$BNSZ`(r5)
914         $UMULL  r8,r6,r7
915         $UMULH  r9,r6,r7
916         addc    r10,r8,r10
917         adde    r11,r9,r11
918         addze   r12,r12
919         $ST     r10,`3*$BNSZ`(r3)       #r[3]=c1
920                                         #mul_add_c(a[3],b[1],c2,c3,c1);
921         $LD     r7,`1*$BNSZ`(r5)                
922         $UMULL  r8,r6,r7
923         $UMULH  r9,r6,r7
924         addc    r11,r8,r11
925         adde    r12,r9,r12
926         addze   r10,r0
927                                         #mul_add_c(a[2],b[2],c2,c3,c1);
928         $LD     r6,`2*$BNSZ`(r4)
929         $LD     r7,`2*$BNSZ`(r5)
930         $UMULL  r8,r6,r7
931         $UMULH  r9,r6,r7
932         addc    r11,r8,r11
933         adde    r12,r9,r12
934         addze   r10,r10
935                                         #mul_add_c(a[1],b[3],c2,c3,c1);
936         $LD     r6,`1*$BNSZ`(r4)
937         $LD     r7,`3*$BNSZ`(r5)
938         $UMULL  r8,r6,r7
939         $UMULH  r9,r6,r7
940         addc    r11,r8,r11
941         adde    r12,r9,r12
942         addze   r10,r10
943         $ST     r11,`4*$BNSZ`(r3)       #r[4]=c2
944                                         #mul_add_c(a[2],b[3],c3,c1,c2);
945         $LD     r6,`2*$BNSZ`(r4)                
946         $UMULL  r8,r6,r7
947         $UMULH  r9,r6,r7
948         addc    r12,r8,r12
949         adde    r10,r9,r10
950         addze   r11,r0
951                                         #mul_add_c(a[3],b[2],c3,c1,c2);
952         $LD     r6,`3*$BNSZ`(r4)
953         $LD     r7,`2*$BNSZ`(r4)
954         $UMULL  r8,r6,r7
955         $UMULH  r9,r6,r7
956         addc    r12,r8,r12
957         adde    r10,r9,r10
958         addze   r11,r11
959         $ST     r12,`5*$BNSZ`(r3)       #r[5]=c3
960                                         #mul_add_c(a[3],b[3],c1,c2,c3);
961         $LD     r7,`3*$BNSZ`(r5)                
962         $UMULL  r8,r6,r7
963         $UMULH  r9,r6,r7
964         addc    r10,r8,r10
965         adde    r11,r9,r11
966
967         $ST     r10,`6*$BNSZ`(r3)       #r[6]=c1
968         $ST     r11,`7*$BNSZ`(r3)       #r[7]=c2
969         blr
970         .long   0x00000000
971
972 #
973 #       NOTE:   The following label name should be changed to
974 #               "bn_mul_comba8" i.e. remove the first dot
975 #               for the gcc compiler. This should be automatically
976 #               done in the build
977 #
978         
979 .align  4
980 .bn_mul_comba8:
981 #
982 # Optimized version of the bn_mul_comba8 routine.
983 #
984 # void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
985 # r3 contains r
986 # r4 contains a
987 # r5 contains b
988 # r6, r7 are the 2 BN_ULONGs being multiplied.
989 # r8, r9 are the results of the 32x32 giving 64 multiply.
990 # r10, r11, r12 are the equivalents of c1, c2, and c3.
991 #
992         xor     r0,r0,r0                #r0=0. Used in addze below.
993         
994                                         #mul_add_c(a[0],b[0],c1,c2,c3);
995         $LD     r6,`0*$BNSZ`(r4)        #a[0]
996         $LD     r7,`0*$BNSZ`(r5)        #b[0]
997         $UMULL  r10,r6,r7
998         $UMULH  r11,r6,r7
999         $ST     r10,`0*$BNSZ`(r3)       #r[0]=c1;
1000                                         #mul_add_c(a[0],b[1],c2,c3,c1);
1001         $LD     r7,`1*$BNSZ`(r5)
1002         $UMULL  r8,r6,r7
1003         $UMULH  r9,r6,r7
1004         addc    r11,r11,r8
1005         addze   r12,r9                  # since we didnt set r12 to zero before.
1006         addze   r10,r0
1007                                         #mul_add_c(a[1],b[0],c2,c3,c1);
1008         $LD     r6,`1*$BNSZ`(r4)
1009         $LD     r7,`0*$BNSZ`(r5)
1010         $UMULL  r8,r6,r7
1011         $UMULH  r9,r6,r7
1012         addc    r11,r11,r8
1013         adde    r12,r12,r9
1014         addze   r10,r10
1015         $ST     r11,`1*$BNSZ`(r3)       #r[1]=c2;
1016                                         #mul_add_c(a[2],b[0],c3,c1,c2);
1017         $LD     r6,`2*$BNSZ`(r4)
1018         $UMULL  r8,r6,r7
1019         $UMULH  r9,r6,r7
1020         addc    r12,r12,r8
1021         adde    r10,r10,r9
1022         addze   r11,r0
1023                                         #mul_add_c(a[1],b[1],c3,c1,c2);
1024         $LD     r6,`1*$BNSZ`(r4)
1025         $LD     r7,`1*$BNSZ`(r5)
1026         $UMULL  r8,r6,r7
1027         $UMULH  r9,r6,r7
1028         addc    r12,r12,r8
1029         adde    r10,r10,r9
1030         addze   r11,r11
1031                                         #mul_add_c(a[0],b[2],c3,c1,c2);
1032         $LD     r6,`0*$BNSZ`(r4)
1033         $LD     r7,`2*$BNSZ`(r5)
1034         $UMULL  r8,r6,r7
1035         $UMULH  r9,r6,r7
1036         addc    r12,r12,r8
1037         adde    r10,r10,r9
1038         addze   r11,r11
1039         $ST     r12,`2*$BNSZ`(r3)       #r[2]=c3;
1040                                         #mul_add_c(a[0],b[3],c1,c2,c3);
1041         $LD     r7,`3*$BNSZ`(r5)
1042         $UMULL  r8,r6,r7
1043         $UMULH  r9,r6,r7
1044         addc    r10,r10,r8
1045         adde    r11,r11,r9
1046         addze   r12,r0
1047                                         #mul_add_c(a[1],b[2],c1,c2,c3);
1048         $LD     r6,`1*$BNSZ`(r4)
1049         $LD     r7,`2*$BNSZ`(r5)
1050         $UMULL  r8,r6,r7
1051         $UMULH  r9,r6,r7
1052         addc    r10,r10,r8
1053         adde    r11,r11,r9
1054         addze   r12,r12
1055                 
1056                                         #mul_add_c(a[2],b[1],c1,c2,c3);
1057         $LD     r6,`2*$BNSZ`(r4)
1058         $LD     r7,`1*$BNSZ`(r5)
1059         $UMULL  r8,r6,r7
1060         $UMULH  r9,r6,r7
1061         addc    r10,r10,r8
1062         adde    r11,r11,r9
1063         addze   r12,r12
1064                                         #mul_add_c(a[3],b[0],c1,c2,c3);
1065         $LD     r6,`3*$BNSZ`(r4)
1066         $LD     r7,`0*$BNSZ`(r5)
1067         $UMULL  r8,r6,r7
1068         $UMULH  r9,r6,r7
1069         addc    r10,r10,r8
1070         adde    r11,r11,r9
1071         addze   r12,r12
1072         $ST     r10,`3*$BNSZ`(r3)       #r[3]=c1;
1073                                         #mul_add_c(a[4],b[0],c2,c3,c1);
1074         $LD     r6,`4*$BNSZ`(r4)
1075         $UMULL  r8,r6,r7
1076         $UMULH  r9,r6,r7
1077         addc    r11,r11,r8
1078         adde    r12,r12,r9
1079         addze   r10,r0
1080                                         #mul_add_c(a[3],b[1],c2,c3,c1);
1081         $LD     r6,`3*$BNSZ`(r4)
1082         $LD     r7,`1*$BNSZ`(r5)
1083         $UMULL  r8,r6,r7
1084         $UMULH  r9,r6,r7
1085         addc    r11,r11,r8
1086         adde    r12,r12,r9
1087         addze   r10,r10
1088                                         #mul_add_c(a[2],b[2],c2,c3,c1);
1089         $LD     r6,`2*$BNSZ`(r4)
1090         $LD     r7,`2*$BNSZ`(r5)
1091         $UMULL  r8,r6,r7
1092         $UMULH  r9,r6,r7
1093         addc    r11,r11,r8
1094         adde    r12,r12,r9
1095         addze   r10,r10
1096                                         #mul_add_c(a[1],b[3],c2,c3,c1);
1097         $LD     r6,`1*$BNSZ`(r4)
1098         $LD     r7,`3*$BNSZ`(r5)
1099         $UMULL  r8,r6,r7
1100         $UMULH  r9,r6,r7
1101         addc    r11,r11,r8
1102         adde    r12,r12,r9
1103         addze   r10,r10
1104                                         #mul_add_c(a[0],b[4],c2,c3,c1);
1105         $LD     r6,`0*$BNSZ`(r4)
1106         $LD     r7,`4*$BNSZ`(r5)
1107         $UMULL  r8,r6,r7
1108         $UMULH  r9,r6,r7
1109         addc    r11,r11,r8
1110         adde    r12,r12,r9
1111         addze   r10,r10
1112         $ST     r11,`4*$BNSZ`(r3)       #r[4]=c2;
1113                                         #mul_add_c(a[0],b[5],c3,c1,c2);
1114         $LD     r7,`5*$BNSZ`(r5)
1115         $UMULL  r8,r6,r7
1116         $UMULH  r9,r6,r7
1117         addc    r12,r12,r8
1118         adde    r10,r10,r9
1119         addze   r11,r0
1120                                         #mul_add_c(a[1],b[4],c3,c1,c2);
1121         $LD     r6,`1*$BNSZ`(r4)                
1122         $LD     r7,`4*$BNSZ`(r5)
1123         $UMULL  r8,r6,r7
1124         $UMULH  r9,r6,r7
1125         addc    r12,r12,r8
1126         adde    r10,r10,r9
1127         addze   r11,r11
1128                                         #mul_add_c(a[2],b[3],c3,c1,c2);
1129         $LD     r6,`2*$BNSZ`(r4)                
1130         $LD     r7,`3*$BNSZ`(r5)
1131         $UMULL  r8,r6,r7
1132         $UMULH  r9,r6,r7
1133         addc    r12,r12,r8
1134         adde    r10,r10,r9
1135         addze   r11,r11
1136                                         #mul_add_c(a[3],b[2],c3,c1,c2);
1137         $LD     r6,`3*$BNSZ`(r4)                
1138         $LD     r7,`2*$BNSZ`(r5)
1139         $UMULL  r8,r6,r7
1140         $UMULH  r9,r6,r7
1141         addc    r12,r12,r8
1142         adde    r10,r10,r9
1143         addze   r11,r11
1144                                         #mul_add_c(a[4],b[1],c3,c1,c2);
1145         $LD     r6,`4*$BNSZ`(r4)                
1146         $LD     r7,`1*$BNSZ`(r5)
1147         $UMULL  r8,r6,r7
1148         $UMULH  r9,r6,r7
1149         addc    r12,r12,r8
1150         adde    r10,r10,r9
1151         addze   r11,r11
1152                                         #mul_add_c(a[5],b[0],c3,c1,c2);
1153         $LD     r6,`5*$BNSZ`(r4)                
1154         $LD     r7,`0*$BNSZ`(r5)
1155         $UMULL  r8,r6,r7
1156         $UMULH  r9,r6,r7
1157         addc    r12,r12,r8
1158         adde    r10,r10,r9
1159         addze   r11,r11
1160         $ST     r12,`5*$BNSZ`(r3)       #r[5]=c3;
1161                                         #mul_add_c(a[6],b[0],c1,c2,c3);
1162         $LD     r6,`6*$BNSZ`(r4)
1163         $UMULL  r8,r6,r7
1164         $UMULH  r9,r6,r7
1165         addc    r10,r10,r8
1166         adde    r11,r11,r9
1167         addze   r12,r0
1168                                         #mul_add_c(a[5],b[1],c1,c2,c3);
1169         $LD     r6,`5*$BNSZ`(r4)
1170         $LD     r7,`1*$BNSZ`(r5)
1171         $UMULL  r8,r6,r7
1172         $UMULH  r9,r6,r7
1173         addc    r10,r10,r8
1174         adde    r11,r11,r9
1175         addze   r12,r12
1176                                         #mul_add_c(a[4],b[2],c1,c2,c3);
1177         $LD     r6,`4*$BNSZ`(r4)
1178         $LD     r7,`2*$BNSZ`(r5)
1179         $UMULL  r8,r6,r7
1180         $UMULH  r9,r6,r7
1181         addc    r10,r10,r8
1182         adde    r11,r11,r9
1183         addze   r12,r12
1184                                         #mul_add_c(a[3],b[3],c1,c2,c3);
1185         $LD     r6,`3*$BNSZ`(r4)
1186         $LD     r7,`3*$BNSZ`(r5)
1187         $UMULL  r8,r6,r7
1188         $UMULH  r9,r6,r7
1189         addc    r10,r10,r8
1190         adde    r11,r11,r9
1191         addze   r12,r12
1192                                         #mul_add_c(a[2],b[4],c1,c2,c3);
1193         $LD     r6,`2*$BNSZ`(r4)
1194         $LD     r7,`4*$BNSZ`(r5)
1195         $UMULL  r8,r6,r7
1196         $UMULH  r9,r6,r7
1197         addc    r10,r10,r8
1198         adde    r11,r11,r9
1199         addze   r12,r12
1200                                         #mul_add_c(a[1],b[5],c1,c2,c3);
1201         $LD     r6,`1*$BNSZ`(r4)
1202         $LD     r7,`5*$BNSZ`(r5)
1203         $UMULL  r8,r6,r7
1204         $UMULH  r9,r6,r7
1205         addc    r10,r10,r8
1206         adde    r11,r11,r9
1207         addze   r12,r12
1208                                         #mul_add_c(a[0],b[6],c1,c2,c3);
1209         $LD     r6,`0*$BNSZ`(r4)
1210         $LD     r7,`6*$BNSZ`(r5)
1211         $UMULL  r8,r6,r7
1212         $UMULH  r9,r6,r7
1213         addc    r10,r10,r8
1214         adde    r11,r11,r9
1215         addze   r12,r12
1216         $ST     r10,`6*$BNSZ`(r3)       #r[6]=c1;
1217                                         #mul_add_c(a[0],b[7],c2,c3,c1);
1218         $LD     r7,`7*$BNSZ`(r5)
1219         $UMULL  r8,r6,r7
1220         $UMULH  r9,r6,r7
1221         addc    r11,r11,r8
1222         adde    r12,r12,r9
1223         addze   r10,r0
1224                                         #mul_add_c(a[1],b[6],c2,c3,c1);
1225         $LD     r6,`1*$BNSZ`(r4)
1226         $LD     r7,`6*$BNSZ`(r5)
1227         $UMULL  r8,r6,r7
1228         $UMULH  r9,r6,r7
1229         addc    r11,r11,r8
1230         adde    r12,r12,r9
1231         addze   r10,r10
1232                                         #mul_add_c(a[2],b[5],c2,c3,c1);
1233         $LD     r6,`2*$BNSZ`(r4)
1234         $LD     r7,`5*$BNSZ`(r5)
1235         $UMULL  r8,r6,r7
1236         $UMULH  r9,r6,r7
1237         addc    r11,r11,r8
1238         adde    r12,r12,r9
1239         addze   r10,r10
1240                                         #mul_add_c(a[3],b[4],c2,c3,c1);
1241         $LD     r6,`3*$BNSZ`(r4)
1242         $LD     r7,`4*$BNSZ`(r5)
1243         $UMULL  r8,r6,r7
1244         $UMULH  r9,r6,r7
1245         addc    r11,r11,r8
1246         adde    r12,r12,r9
1247         addze   r10,r10
1248                                         #mul_add_c(a[4],b[3],c2,c3,c1);
1249         $LD     r6,`4*$BNSZ`(r4)
1250         $LD     r7,`3*$BNSZ`(r5)
1251         $UMULL  r8,r6,r7
1252         $UMULH  r9,r6,r7
1253         addc    r11,r11,r8
1254         adde    r12,r12,r9
1255         addze   r10,r10
1256                                         #mul_add_c(a[5],b[2],c2,c3,c1);
1257         $LD     r6,`5*$BNSZ`(r4)
1258         $LD     r7,`2*$BNSZ`(r5)
1259         $UMULL  r8,r6,r7
1260         $UMULH  r9,r6,r7
1261         addc    r11,r11,r8
1262         adde    r12,r12,r9
1263         addze   r10,r10
1264                                         #mul_add_c(a[6],b[1],c2,c3,c1);
1265         $LD     r6,`6*$BNSZ`(r4)
1266         $LD     r7,`1*$BNSZ`(r5)
1267         $UMULL  r8,r6,r7
1268         $UMULH  r9,r6,r7
1269         addc    r11,r11,r8
1270         adde    r12,r12,r9
1271         addze   r10,r10
1272                                         #mul_add_c(a[7],b[0],c2,c3,c1);
1273         $LD     r6,`7*$BNSZ`(r4)
1274         $LD     r7,`0*$BNSZ`(r5)
1275         $UMULL  r8,r6,r7
1276         $UMULH  r9,r6,r7
1277         addc    r11,r11,r8
1278         adde    r12,r12,r9
1279         addze   r10,r10
1280         $ST     r11,`7*$BNSZ`(r3)       #r[7]=c2;
1281                                         #mul_add_c(a[7],b[1],c3,c1,c2);
1282         $LD     r7,`1*$BNSZ`(r5)
1283         $UMULL  r8,r6,r7
1284         $UMULH  r9,r6,r7
1285         addc    r12,r12,r8
1286         adde    r10,r10,r9
1287         addze   r11,r0
1288                                         #mul_add_c(a[6],b[2],c3,c1,c2);
1289         $LD     r6,`6*$BNSZ`(r4)
1290         $LD     r7,`2*$BNSZ`(r5)
1291         $UMULL  r8,r6,r7
1292         $UMULH  r9,r6,r7
1293         addc    r12,r12,r8
1294         adde    r10,r10,r9
1295         addze   r11,r11
1296                                         #mul_add_c(a[5],b[3],c3,c1,c2);
1297         $LD     r6,`5*$BNSZ`(r4)
1298         $LD     r7,`3*$BNSZ`(r5)
1299         $UMULL  r8,r6,r7
1300         $UMULH  r9,r6,r7
1301         addc    r12,r12,r8
1302         adde    r10,r10,r9
1303         addze   r11,r11
1304                                         #mul_add_c(a[4],b[4],c3,c1,c2);
1305         $LD     r6,`4*$BNSZ`(r4)
1306         $LD     r7,`4*$BNSZ`(r5)
1307         $UMULL  r8,r6,r7
1308         $UMULH  r9,r6,r7
1309         addc    r12,r12,r8
1310         adde    r10,r10,r9
1311         addze   r11,r11
1312                                         #mul_add_c(a[3],b[5],c3,c1,c2);
1313         $LD     r6,`3*$BNSZ`(r4)
1314         $LD     r7,`5*$BNSZ`(r5)
1315         $UMULL  r8,r6,r7
1316         $UMULH  r9,r6,r7
1317         addc    r12,r12,r8
1318         adde    r10,r10,r9
1319         addze   r11,r11
1320                                         #mul_add_c(a[2],b[6],c3,c1,c2);
1321         $LD     r6,`2*$BNSZ`(r4)
1322         $LD     r7,`6*$BNSZ`(r5)
1323         $UMULL  r8,r6,r7
1324         $UMULH  r9,r6,r7
1325         addc    r12,r12,r8
1326         adde    r10,r10,r9
1327         addze   r11,r11
1328                                         #mul_add_c(a[1],b[7],c3,c1,c2);
1329         $LD     r6,`1*$BNSZ`(r4)
1330         $LD     r7,`7*$BNSZ`(r5)
1331         $UMULL  r8,r6,r7
1332         $UMULH  r9,r6,r7
1333         addc    r12,r12,r8
1334         adde    r10,r10,r9
1335         addze   r11,r11
1336         $ST     r12,`8*$BNSZ`(r3)       #r[8]=c3;
1337                                         #mul_add_c(a[2],b[7],c1,c2,c3);
1338         $LD     r6,`2*$BNSZ`(r4)
1339         $UMULL  r8,r6,r7
1340         $UMULH  r9,r6,r7
1341         addc    r10,r10,r8
1342         adde    r11,r11,r9
1343         addze   r12,r0
1344                                         #mul_add_c(a[3],b[6],c1,c2,c3);
1345         $LD     r6,`3*$BNSZ`(r4)
1346         $LD     r7,`6*$BNSZ`(r5)
1347         $UMULL  r8,r6,r7
1348         $UMULH  r9,r6,r7
1349         addc    r10,r10,r8
1350         adde    r11,r11,r9
1351         addze   r12,r12
1352                                         #mul_add_c(a[4],b[5],c1,c2,c3);
1353         $LD     r6,`4*$BNSZ`(r4)
1354         $LD     r7,`5*$BNSZ`(r5)
1355         $UMULL  r8,r6,r7
1356         $UMULH  r9,r6,r7
1357         addc    r10,r10,r8
1358         adde    r11,r11,r9
1359         addze   r12,r12
1360                                         #mul_add_c(a[5],b[4],c1,c2,c3);
1361         $LD     r6,`5*$BNSZ`(r4)
1362         $LD     r7,`4*$BNSZ`(r5)
1363         $UMULL  r8,r6,r7
1364         $UMULH  r9,r6,r7
1365         addc    r10,r10,r8
1366         adde    r11,r11,r9
1367         addze   r12,r12
1368                                         #mul_add_c(a[6],b[3],c1,c2,c3);
1369         $LD     r6,`6*$BNSZ`(r4)
1370         $LD     r7,`3*$BNSZ`(r5)
1371         $UMULL  r8,r6,r7
1372         $UMULH  r9,r6,r7
1373         addc    r10,r10,r8
1374         adde    r11,r11,r9
1375         addze   r12,r12
1376                                         #mul_add_c(a[7],b[2],c1,c2,c3);
1377         $LD     r6,`7*$BNSZ`(r4)
1378         $LD     r7,`2*$BNSZ`(r5)
1379         $UMULL  r8,r6,r7
1380         $UMULH  r9,r6,r7
1381         addc    r10,r10,r8
1382         adde    r11,r11,r9
1383         addze   r12,r12
1384         $ST     r10,`9*$BNSZ`(r3)       #r[9]=c1;
1385                                         #mul_add_c(a[7],b[3],c2,c3,c1);
1386         $LD     r7,`3*$BNSZ`(r5)
1387         $UMULL  r8,r6,r7
1388         $UMULH  r9,r6,r7
1389         addc    r11,r11,r8
1390         adde    r12,r12,r9
1391         addze   r10,r0
1392                                         #mul_add_c(a[6],b[4],c2,c3,c1);
1393         $LD     r6,`6*$BNSZ`(r4)
1394         $LD     r7,`4*$BNSZ`(r5)
1395         $UMULL  r8,r6,r7
1396         $UMULH  r9,r6,r7
1397         addc    r11,r11,r8
1398         adde    r12,r12,r9
1399         addze   r10,r10
1400                                         #mul_add_c(a[5],b[5],c2,c3,c1);
1401         $LD     r6,`5*$BNSZ`(r4)
1402         $LD     r7,`5*$BNSZ`(r5)
1403         $UMULL  r8,r6,r7
1404         $UMULH  r9,r6,r7
1405         addc    r11,r11,r8
1406         adde    r12,r12,r9
1407         addze   r10,r10
1408                                         #mul_add_c(a[4],b[6],c2,c3,c1);
1409         $LD     r6,`4*$BNSZ`(r4)
1410         $LD     r7,`6*$BNSZ`(r5)
1411         $UMULL  r8,r6,r7
1412         $UMULH  r9,r6,r7
1413         addc    r11,r11,r8
1414         adde    r12,r12,r9
1415         addze   r10,r10
1416                                         #mul_add_c(a[3],b[7],c2,c3,c1);
1417         $LD     r6,`3*$BNSZ`(r4)
1418         $LD     r7,`7*$BNSZ`(r5)
1419         $UMULL  r8,r6,r7
1420         $UMULH  r9,r6,r7
1421         addc    r11,r11,r8
1422         adde    r12,r12,r9
1423         addze   r10,r10
1424         $ST     r11,`10*$BNSZ`(r3)      #r[10]=c2;
1425                                         #mul_add_c(a[4],b[7],c3,c1,c2);
1426         $LD     r6,`4*$BNSZ`(r4)
1427         $UMULL  r8,r6,r7
1428         $UMULH  r9,r6,r7
1429         addc    r12,r12,r8
1430         adde    r10,r10,r9
1431         addze   r11,r0
1432                                         #mul_add_c(a[5],b[6],c3,c1,c2);
1433         $LD     r6,`5*$BNSZ`(r4)
1434         $LD     r7,`6*$BNSZ`(r5)
1435         $UMULL  r8,r6,r7
1436         $UMULH  r9,r6,r7
1437         addc    r12,r12,r8
1438         adde    r10,r10,r9
1439         addze   r11,r11
1440                                         #mul_add_c(a[6],b[5],c3,c1,c2);
1441         $LD     r6,`6*$BNSZ`(r4)
1442         $LD     r7,`5*$BNSZ`(r5)
1443         $UMULL  r8,r6,r7
1444         $UMULH  r9,r6,r7
1445         addc    r12,r12,r8
1446         adde    r10,r10,r9
1447         addze   r11,r11
1448                                         #mul_add_c(a[7],b[4],c3,c1,c2);
1449         $LD     r6,`7*$BNSZ`(r4)
1450         $LD     r7,`4*$BNSZ`(r5)
1451         $UMULL  r8,r6,r7
1452         $UMULH  r9,r6,r7
1453         addc    r12,r12,r8
1454         adde    r10,r10,r9
1455         addze   r11,r11
1456         $ST     r12,`11*$BNSZ`(r3)      #r[11]=c3;
1457                                         #mul_add_c(a[7],b[5],c1,c2,c3);
1458         $LD     r7,`5*$BNSZ`(r5)
1459         $UMULL  r8,r6,r7
1460         $UMULH  r9,r6,r7
1461         addc    r10,r10,r8
1462         adde    r11,r11,r9
1463         addze   r12,r0
1464                                         #mul_add_c(a[6],b[6],c1,c2,c3);
1465         $LD     r6,`6*$BNSZ`(r4)
1466         $LD     r7,`6*$BNSZ`(r5)
1467         $UMULL  r8,r6,r7
1468         $UMULH  r9,r6,r7
1469         addc    r10,r10,r8
1470         adde    r11,r11,r9
1471         addze   r12,r12
1472                                         #mul_add_c(a[5],b[7],c1,c2,c3);
1473         $LD     r6,`5*$BNSZ`(r4)
1474         $LD     r7,`7*$BNSZ`(r5)
1475         $UMULL  r8,r6,r7
1476         $UMULH  r9,r6,r7
1477         addc    r10,r10,r8
1478         adde    r11,r11,r9
1479         addze   r12,r12
1480         $ST     r10,`12*$BNSZ`(r3)      #r[12]=c1;
1481                                         #mul_add_c(a[6],b[7],c2,c3,c1);
1482         $LD     r6,`6*$BNSZ`(r4)
1483         $UMULL  r8,r6,r7
1484         $UMULH  r9,r6,r7
1485         addc    r11,r11,r8
1486         adde    r12,r12,r9
1487         addze   r10,r0
1488                                         #mul_add_c(a[7],b[6],c2,c3,c1);
1489         $LD     r6,`7*$BNSZ`(r4)
1490         $LD     r7,`6*$BNSZ`(r5)
1491         $UMULL  r8,r6,r7
1492         $UMULH  r9,r6,r7
1493         addc    r11,r11,r8
1494         adde    r12,r12,r9
1495         addze   r10,r10
1496         $ST     r11,`13*$BNSZ`(r3)      #r[13]=c2;
1497                                         #mul_add_c(a[7],b[7],c3,c1,c2);
1498         $LD     r7,`7*$BNSZ`(r5)
1499         $UMULL  r8,r6,r7
1500         $UMULH  r9,r6,r7
1501         addc    r12,r12,r8
1502         adde    r10,r10,r9
1503         $ST     r12,`14*$BNSZ`(r3)      #r[14]=c3;
1504         $ST     r10,`15*$BNSZ`(r3)      #r[15]=c1;
1505         blr
1506         .long   0x00000000
1507
1508 #
1509 #       NOTE:   The following label name should be changed to
1510 #               "bn_sub_words" i.e. remove the first dot
1511 #               for the gcc compiler. This should be automatically
1512 #               done in the build
1513 #
1514 #
1515 .align  4
1516 .bn_sub_words:
1517 #
1518 #       Handcoded version of bn_sub_words
1519 #
1520 #BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1521 #
1522 #       r3 = r
1523 #       r4 = a
1524 #       r5 = b
1525 #       r6 = n
1526 #
1527 #       Note:   No loop unrolling done since this is not a performance
1528 #               critical loop.
1529
1530         xor     r0,r0,r0        #set r0 = 0
1531 #
1532 #       check for r6 = 0 AND set carry bit.
1533 #
1534         subfc.  r7,r0,r6        # If r6 is 0 then result is 0.
1535                                 # if r6 > 0 then result !=0
1536                                 # In either case carry bit is set.
1537         beq     Lppcasm_sub_adios
1538         addi    r4,r4,-$BNSZ
1539         addi    r3,r3,-$BNSZ
1540         addi    r5,r5,-$BNSZ
1541         mtctr   r6
1542 Lppcasm_sub_mainloop:   
1543         $LDU    r7,$BNSZ(r4)
1544         $LDU    r8,$BNSZ(r5)
1545         subfe   r6,r8,r7        # r6 = r7+carry bit + onescomplement(r8)
1546                                 # if carry = 1 this is r7-r8. Else it
1547                                 # is r7-r8 -1 as we need.
1548         $STU    r6,$BNSZ(r3)
1549         bdnz-   Lppcasm_sub_mainloop
1550 Lppcasm_sub_adios:      
1551         subfze  r3,r0           # if carry bit is set then r3 = 0 else -1
1552         andi.   r3,r3,1         # keep only last bit.
1553         blr
1554         .long   0x00000000
1555
1556
1557 #
1558 #       NOTE:   The following label name should be changed to
1559 #               "bn_add_words" i.e. remove the first dot
1560 #               for the gcc compiler. This should be automatically
1561 #               done in the build
1562 #
1563
1564 .align  4
1565 .bn_add_words:
1566 #
1567 #       Handcoded version of bn_add_words
1568 #
1569 #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1570 #
1571 #       r3 = r
1572 #       r4 = a
1573 #       r5 = b
1574 #       r6 = n
1575 #
1576 #       Note:   No loop unrolling done since this is not a performance
1577 #               critical loop.
1578
1579         xor     r0,r0,r0
1580 #
1581 #       check for r6 = 0. Is this needed?
1582 #
1583         addic.  r6,r6,0         #test r6 and clear carry bit.
1584         beq     Lppcasm_add_adios
1585         addi    r4,r4,-$BNSZ
1586         addi    r3,r3,-$BNSZ
1587         addi    r5,r5,-$BNSZ
1588         mtctr   r6
1589 Lppcasm_add_mainloop:   
1590         $LDU    r7,$BNSZ(r4)
1591         $LDU    r8,$BNSZ(r5)
1592         adde    r8,r7,r8
1593         $STU    r8,$BNSZ(r3)
1594         bdnz-   Lppcasm_add_mainloop
1595 Lppcasm_add_adios:      
1596         addze   r3,r0                   #return carry bit.
1597         blr
1598         .long   0x00000000
1599
1600 #
1601 #       NOTE:   The following label name should be changed to
1602 #               "bn_div_words" i.e. remove the first dot
1603 #               for the gcc compiler. This should be automatically
1604 #               done in the build
1605 #
1606
1607 .align  4
1608 .bn_div_words:
1609 #
1610 #       This is a cleaned up version of code generated by
1611 #       the AIX compiler. The only optimization is to use
1612 #       the PPC instruction to count leading zeros instead
1613 #       of call to num_bits_word. Since this was compiled
1614 #       only at level -O2 we can possibly squeeze it more?
1615 #       
1616 #       r3 = h
1617 #       r4 = l
1618 #       r5 = d
1619         
1620         $UCMPI  0,r5,0                  # compare r5 and 0
1621         bne     Lppcasm_div1            # proceed if d!=0
1622         li      r3,-1                   # d=0 return -1
1623         blr
1624 Lppcasm_div1:
1625         xor     r0,r0,r0                #r0=0
1626         li      r8,$BITS
1627         $CNTLZ. r7,r5                   #r7 = num leading 0s in d.
1628         beq     Lppcasm_div2            #proceed if no leading zeros
1629         subf    r8,r7,r8                #r8 = BN_num_bits_word(d)
1630         $SHR.   r9,r3,r8                #are there any bits above r8'th?
1631         $TR     16,r9,r0                #if there're, signal to dump core...
1632 Lppcasm_div2:
1633         $UCMP   0,r3,r5                 #h>=d?
1634         blt     Lppcasm_div3            #goto Lppcasm_div3 if not
1635         subf    r3,r5,r3                #h-=d ; 
1636 Lppcasm_div3:                           #r7 = BN_BITS2-i. so r7=i
1637         cmpi    0,0,r7,0                # is (i == 0)?
1638         beq     Lppcasm_div4
1639         $SHL    r3,r3,r7                # h = (h<< i)
1640         $SHR    r8,r4,r8                # r8 = (l >> BN_BITS2 -i)
1641         $SHL    r5,r5,r7                # d<<=i
1642         or      r3,r3,r8                # h = (h<<i)|(l>>(BN_BITS2-i))
1643         $SHL    r4,r4,r7                # l <<=i
1644 Lppcasm_div4:
1645         $SHRI   r9,r5,`$BITS/2`         # r9 = dh
1646                                         # dl will be computed when needed
1647                                         # as it saves registers.
1648         li      r6,2                    #r6=2
1649         mtctr   r6                      #counter will be in count.
1650 Lppcasm_divouterloop: 
1651         $SHRI   r8,r3,`$BITS/2`         #r8 = (h>>BN_BITS4)
1652         $SHRI   r11,r4,`$BITS/2`        #r11= (l&BN_MASK2h)>>BN_BITS4
1653                                         # compute here for innerloop.
1654         $UCMP   0,r8,r9                 # is (h>>BN_BITS4)==dh
1655         bne     Lppcasm_div5            # goto Lppcasm_div5 if not
1656
1657         li      r8,-1
1658         $CLRU   r8,r8,`$BITS/2`         #q = BN_MASK2l 
1659         b       Lppcasm_div6
1660 Lppcasm_div5:
1661         $UDIV   r8,r3,r9                #q = h/dh
1662 Lppcasm_div6:
1663         $UMULL  r12,r9,r8               #th = q*dh
1664         $CLRU   r10,r5,`$BITS/2`        #r10=dl
1665         $UMULL  r6,r8,r10               #tl = q*dl
1666         
1667 Lppcasm_divinnerloop:
1668         subf    r10,r12,r3              #t = h -th
1669         $SHRI   r7,r10,`$BITS/2`        #r7= (t &BN_MASK2H), sort of...
1670         addic.  r7,r7,0                 #test if r7 == 0. used below.
1671                                         # now want to compute
1672                                         # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
1673                                         # the following 2 instructions do that
1674         $SHLI   r7,r10,`$BITS/2`        # r7 = (t<<BN_BITS4)
1675         or      r7,r7,r11               # r7|=((l&BN_MASK2h)>>BN_BITS4)
1676         $UCMP   cr1,r6,r7               # compare (tl <= r7)
1677         bne     Lppcasm_divinnerexit
1678         ble     cr1,Lppcasm_divinnerexit
1679         addi    r8,r8,-1                #q--
1680         subf    r12,r9,r12              #th -=dh
1681         $CLRU   r10,r5,`$BITS/2`        #r10=dl. t is no longer needed in loop.
1682         subf    r6,r10,r6               #tl -=dl
1683         b       Lppcasm_divinnerloop
1684 Lppcasm_divinnerexit:
1685         $SHRI   r10,r6,`$BITS/2`        #t=(tl>>BN_BITS4)
1686         $SHLI   r11,r6,`$BITS/2`        #tl=(tl<<BN_BITS4)&BN_MASK2h;
1687         $UCMP   cr1,r4,r11              # compare l and tl
1688         add     r12,r12,r10             # th+=t
1689         bge     cr1,Lppcasm_div7        # if (l>=tl) goto Lppcasm_div7
1690         addi    r12,r12,1               # th++
1691 Lppcasm_div7:
1692         subf    r11,r11,r4              #r11=l-tl
1693         $UCMP   cr1,r3,r12              #compare h and th
1694         bge     cr1,Lppcasm_div8        #if (h>=th) goto Lppcasm_div8
1695         addi    r8,r8,-1                # q--
1696         add     r3,r5,r3                # h+=d
1697 Lppcasm_div8:
1698         subf    r12,r12,r3              #r12 = h-th
1699         $SHLI   r4,r11,`$BITS/2`        #l=(l&BN_MASK2l)<<BN_BITS4
1700                                         # want to compute
1701                                         # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
1702                                         # the following 2 instructions will do this.
1703         $INSR   r11,r12,`$BITS/2`,`$BITS/2`     # r11 is the value we want rotated $BITS/2.
1704         $ROTL   r3,r11,`$BITS/2`        # rotate by $BITS/2 and store in r3
1705         bdz     Lppcasm_div9            #if (count==0) break ;
1706         $SHLI   r0,r8,`$BITS/2`         #ret =q<<BN_BITS4
1707         b       Lppcasm_divouterloop
1708 Lppcasm_div9:
1709         or      r3,r8,r0
1710         blr
1711         .long   0x00000000
1712
1713 #
1714 #       NOTE:   The following label name should be changed to
1715 #               "bn_sqr_words" i.e. remove the first dot
1716 #               for the gcc compiler. This should be automatically
1717 #               done in the build
1718 #
1719 .align  4
1720 .bn_sqr_words:
1721 #
1722 #       Optimized version of bn_sqr_words
1723 #
1724 #       void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
1725 #
1726 #       r3 = r
1727 #       r4 = a
1728 #       r5 = n
1729 #
1730 #       r6 = a[i].
1731 #       r7,r8 = product.
1732 #
1733 #       No unrolling done here. Not performance critical.
1734
1735         addic.  r5,r5,0                 #test r5.
1736         beq     Lppcasm_sqr_adios
1737         addi    r4,r4,-$BNSZ
1738         addi    r3,r3,-$BNSZ
1739         mtctr   r5
1740 Lppcasm_sqr_mainloop:   
1741                                         #sqr(r[0],r[1],a[0]);
1742         $LDU    r6,$BNSZ(r4)
1743         $UMULL  r7,r6,r6
1744         $UMULH  r8,r6,r6
1745         $STU    r7,$BNSZ(r3)
1746         $STU    r8,$BNSZ(r3)
1747         bdnz-   Lppcasm_sqr_mainloop
1748 Lppcasm_sqr_adios:      
1749         blr
1750         .long   0x00000000
1751
1752
1753 #
1754 #       NOTE:   The following label name should be changed to
1755 #               "bn_mul_words" i.e. remove the first dot
1756 #               for the gcc compiler. This should be automatically
1757 #               done in the build
1758 #
1759
1760 .align  4       
1761 .bn_mul_words:
1762 #
1763 # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1764 #
1765 # r3 = rp
1766 # r4 = ap
1767 # r5 = num
1768 # r6 = w
1769         xor     r0,r0,r0
1770         xor     r12,r12,r12             # used for carry
1771         rlwinm. r7,r5,30,2,31           # num >> 2
1772         beq     Lppcasm_mw_REM
1773         mtctr   r7
1774 Lppcasm_mw_LOOP:        
1775                                         #mul(rp[0],ap[0],w,c1);
1776         $LD     r8,`0*$BNSZ`(r4)
1777         $UMULL  r9,r6,r8
1778         $UMULH  r10,r6,r8
1779         addc    r9,r9,r12
1780         #addze  r10,r10                 #carry is NOT ignored.
1781                                         #will be taken care of
1782                                         #in second spin below
1783                                         #using adde.
1784         $ST     r9,`0*$BNSZ`(r3)
1785                                         #mul(rp[1],ap[1],w,c1);
1786         $LD     r8,`1*$BNSZ`(r4)        
1787         $UMULL  r11,r6,r8
1788         $UMULH  r12,r6,r8
1789         adde    r11,r11,r10
1790         #addze  r12,r12
1791         $ST     r11,`1*$BNSZ`(r3)
1792                                         #mul(rp[2],ap[2],w,c1);
1793         $LD     r8,`2*$BNSZ`(r4)
1794         $UMULL  r9,r6,r8
1795         $UMULH  r10,r6,r8
1796         adde    r9,r9,r12
1797         #addze  r10,r10
1798         $ST     r9,`2*$BNSZ`(r3)
1799                                         #mul_add(rp[3],ap[3],w,c1);
1800         $LD     r8,`3*$BNSZ`(r4)
1801         $UMULL  r11,r6,r8
1802         $UMULH  r12,r6,r8
1803         adde    r11,r11,r10
1804         addze   r12,r12                 #this spin we collect carry into
1805                                         #r12
1806         $ST     r11,`3*$BNSZ`(r3)
1807         
1808         addi    r3,r3,`4*$BNSZ`
1809         addi    r4,r4,`4*$BNSZ`
1810         bdnz-   Lppcasm_mw_LOOP
1811
1812 Lppcasm_mw_REM:
1813         andi.   r5,r5,0x3
1814         beq     Lppcasm_mw_OVER
1815                                         #mul(rp[0],ap[0],w,c1);
1816         $LD     r8,`0*$BNSZ`(r4)
1817         $UMULL  r9,r6,r8
1818         $UMULH  r10,r6,r8
1819         addc    r9,r9,r12
1820         addze   r10,r10
1821         $ST     r9,`0*$BNSZ`(r3)
1822         addi    r12,r10,0
1823         
1824         addi    r5,r5,-1
1825         cmpli   0,0,r5,0
1826         beq     Lppcasm_mw_OVER
1827
1828         
1829                                         #mul(rp[1],ap[1],w,c1);
1830         $LD     r8,`1*$BNSZ`(r4)        
1831         $UMULL  r9,r6,r8
1832         $UMULH  r10,r6,r8
1833         addc    r9,r9,r12
1834         addze   r10,r10
1835         $ST     r9,`1*$BNSZ`(r3)
1836         addi    r12,r10,0
1837         
1838         addi    r5,r5,-1
1839         cmpli   0,0,r5,0
1840         beq     Lppcasm_mw_OVER
1841         
1842                                         #mul_add(rp[2],ap[2],w,c1);
1843         $LD     r8,`2*$BNSZ`(r4)
1844         $UMULL  r9,r6,r8
1845         $UMULH  r10,r6,r8
1846         addc    r9,r9,r12
1847         addze   r10,r10
1848         $ST     r9,`2*$BNSZ`(r3)
1849         addi    r12,r10,0
1850                 
1851 Lppcasm_mw_OVER:        
1852         addi    r3,r12,0
1853         blr
1854         .long   0x00000000
1855
1856 #
1857 #       NOTE:   The following label name should be changed to
1858 #               "bn_mul_add_words" i.e. remove the first dot
1859 #               for the gcc compiler. This should be automatically
1860 #               done in the build
1861 #
1862
1863 .align  4
1864 .bn_mul_add_words:
1865 #
1866 # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1867 #
1868 # r3 = rp
1869 # r4 = ap
1870 # r5 = num
1871 # r6 = w
1872 #
1873 # empirical evidence suggests that unrolled version performs best!!
1874 #
1875         xor     r0,r0,r0                #r0 = 0
1876         xor     r12,r12,r12             #r12 = 0 . used for carry               
1877         rlwinm. r7,r5,30,2,31           # num >> 2
1878         beq     Lppcasm_maw_leftover    # if (num < 4) go LPPCASM_maw_leftover
1879         mtctr   r7
1880 Lppcasm_maw_mainloop:   
1881                                         #mul_add(rp[0],ap[0],w,c1);
1882         $LD     r8,`0*$BNSZ`(r4)
1883         $LD     r11,`0*$BNSZ`(r3)
1884         $UMULL  r9,r6,r8
1885         $UMULH  r10,r6,r8
1886         addc    r9,r9,r12               #r12 is carry.
1887         addze   r10,r10
1888         addc    r9,r9,r11
1889         #addze  r10,r10
1890                                         #the above instruction addze
1891                                         #is NOT needed. Carry will NOT
1892                                         #be ignored. It's not affected
1893                                         #by multiply and will be collected
1894                                         #in the next spin
1895         $ST     r9,`0*$BNSZ`(r3)
1896         
1897                                         #mul_add(rp[1],ap[1],w,c1);
1898         $LD     r8,`1*$BNSZ`(r4)        
1899         $LD     r9,`1*$BNSZ`(r3)
1900         $UMULL  r11,r6,r8
1901         $UMULH  r12,r6,r8
1902         adde    r11,r11,r10             #r10 is carry.
1903         addze   r12,r12
1904         addc    r11,r11,r9
1905         #addze  r12,r12
1906         $ST     r11,`1*$BNSZ`(r3)
1907         
1908                                         #mul_add(rp[2],ap[2],w,c1);
1909         $LD     r8,`2*$BNSZ`(r4)
1910         $UMULL  r9,r6,r8
1911         $LD     r11,`2*$BNSZ`(r3)
1912         $UMULH  r10,r6,r8
1913         adde    r9,r9,r12
1914         addze   r10,r10
1915         addc    r9,r9,r11
1916         #addze  r10,r10
1917         $ST     r9,`2*$BNSZ`(r3)
1918         
1919                                         #mul_add(rp[3],ap[3],w,c1);
1920         $LD     r8,`3*$BNSZ`(r4)
1921         $UMULL  r11,r6,r8
1922         $LD     r9,`3*$BNSZ`(r3)
1923         $UMULH  r12,r6,r8
1924         adde    r11,r11,r10
1925         addze   r12,r12
1926         addc    r11,r11,r9
1927         addze   r12,r12
1928         $ST     r11,`3*$BNSZ`(r3)
1929         addi    r3,r3,`4*$BNSZ`
1930         addi    r4,r4,`4*$BNSZ`
1931         bdnz-   Lppcasm_maw_mainloop
1932         
1933 Lppcasm_maw_leftover:
1934         andi.   r5,r5,0x3
1935         beq     Lppcasm_maw_adios
1936         addi    r3,r3,-$BNSZ
1937         addi    r4,r4,-$BNSZ
1938                                         #mul_add(rp[0],ap[0],w,c1);
1939         mtctr   r5
1940         $LDU    r8,$BNSZ(r4)
1941         $UMULL  r9,r6,r8
1942         $UMULH  r10,r6,r8
1943         $LDU    r11,$BNSZ(r3)
1944         addc    r9,r9,r11
1945         addze   r10,r10
1946         addc    r9,r9,r12
1947         addze   r12,r10
1948         $ST     r9,0(r3)
1949         
1950         bdz     Lppcasm_maw_adios
1951                                         #mul_add(rp[1],ap[1],w,c1);
1952         $LDU    r8,$BNSZ(r4)    
1953         $UMULL  r9,r6,r8
1954         $UMULH  r10,r6,r8
1955         $LDU    r11,$BNSZ(r3)
1956         addc    r9,r9,r11
1957         addze   r10,r10
1958         addc    r9,r9,r12
1959         addze   r12,r10
1960         $ST     r9,0(r3)
1961         
1962         bdz     Lppcasm_maw_adios
1963                                         #mul_add(rp[2],ap[2],w,c1);
1964         $LDU    r8,$BNSZ(r4)
1965         $UMULL  r9,r6,r8
1966         $UMULH  r10,r6,r8
1967         $LDU    r11,$BNSZ(r3)
1968         addc    r9,r9,r11
1969         addze   r10,r10
1970         addc    r9,r9,r12
1971         addze   r12,r10
1972         $ST     r9,0(r3)
1973                 
1974 Lppcasm_maw_adios:      
1975         addi    r3,r12,0
1976         blr
1977         .long   0x00000000
1978         .align  4
1979 EOF
1980 $data =~ s/\`([^\`]*)\`/eval $1/gem;
1981 print $data;
1982 close STDOUT;