3 # Implemented as a Perl wrapper as we want to support several different
4 # architectures with single file. We pick up the target based on the
5 # file name we are asked to generate.
7 # It should be noted though that this perl code is nothing like
8 # <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
9 # as pre-processor to cover for platform differences in name decoration,
10 # linker tables, 32-/64-bit instruction sets...
12 # As you might know there're several PowerPC ABI in use. Most notably
13 # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
14 # are similar enough to implement leaf(!) functions, which would be ABI
15 # neutral. And that's what you find here: ABI neutral leaf functions.
16 # In case you wonder what that is...
20 # MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
22 # The following is the performance of 32-bit compiler
25 # OpenSSL 0.9.6c 21 dec 2001
26 # built on: Tue Jun 11 11:06:51 EDT 2002
27 # options:bn(64,32) ...
28 #compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
29 # sign verify sign/s verify/s
30 #rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
31 #rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
32 #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
33 #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
34 #dsa 512 bits 0.0087s 0.0106s 114.3 94.5
35 #dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
37 # Same bechmark with this assembler code:
39 #rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
40 #rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
41 #rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
42 #rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
43 #dsa 512 bits 0.0052s 0.0062s 191.6 162.0
44 #dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
46 # Number of operations increases by at almost 75%
48 # Here are performance numbers for 64-bit compiler
51 # OpenSSL 0.9.6g [engine] 9 Aug 2002
52 # built on: Fri Apr 18 16:59:20 EDT 2003
53 # options:bn(64,64) ...
54 # compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
55 # sign verify sign/s verify/s
56 #rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
57 #rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
58 #rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
59 #rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
60 #dsa 512 bits 0.0026s 0.0032s 382.5 313.7
61 #dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
63 # Same benchmark with this assembler code:
65 #rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
66 #rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
67 #rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
68 #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
69 #dsa 512 bits 0.0016s 0.0020s 610.7 507.1
70 #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
72 # Again, performance increases by at about 75%
74 # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
75 # OpenSSL 0.9.7c 30 Sep 2003
79 #rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
80 #rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
81 #rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
82 #rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
83 #dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
84 #dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
85 #dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
87 # Same benchmark with this assembler code:
89 #rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
90 #rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
91 #rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
92 #rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
93 #dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
94 #dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
95 #dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
97 # Performance increase of ~60%
99 # If you have comments or suggestions to improve code send
100 # me a note at schari@us.ibm.com
105 if ($opf =~ /32\.s/) {
111 $LDU= "lwzu"; # load and update
113 $STU= "stwu"; # store and update
114 $UMULL= "mullw"; # unsigned multiply low
115 $UMULH= "mulhwu"; # unsigned multiply high
116 $UDIV= "divwu"; # unsigned divide
117 $UCMPI= "cmplwi"; # unsigned compare with immediate
118 $UCMP= "cmplw"; # unsigned compare
119 $CNTLZ= "cntlzw"; # count leading zeros
120 $SHL= "slw"; # shift left
121 $SHR= "srw"; # unsigned shift right
122 $SHRI= "srwi"; # unsigned shift right by immediate
123 $SHLI= "slwi"; # shift left by immediate
124 $CLRU= "clrlwi"; # clear upper bits
125 $INSR= "insrwi"; # insert right
126 $ROTL= "rotlwi"; # rotate left by immediate
127 $TR= "tw"; # conditional trap
128 } elsif ($opf =~ /64\.s/) {
133 # same as above, but 64-bit mnemonics...
135 $LDU= "ldu"; # load and update
137 $STU= "stdu"; # store and update
138 $UMULL= "mulld"; # unsigned multiply low
139 $UMULH= "mulhdu"; # unsigned multiply high
140 $UDIV= "divdu"; # unsigned divide
141 $UCMPI= "cmpldi"; # unsigned compare with immediate
142 $UCMP= "cmpld"; # unsigned compare
143 $CNTLZ= "cntlzd"; # count leading zeros
144 $SHL= "sld"; # shift left
145 $SHR= "srd"; # unsigned shift right
146 $SHRI= "srdi"; # unsigned shift right by immediate
147 $SHLI= "sldi"; # shift left by immediate
148 $CLRU= "clrldi"; # clear upper bits
149 $INSR= "insrdi"; # insert right
150 $ROTL= "rotldi"; # rotate left by immediate
151 $TR= "td"; # conditional trap
152 } else { die "nonsense $opf"; }
154 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
155 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
156 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
157 die "can't locate ppc-xlate.pl";
159 ( defined shift || open STDOUT,"| $^X $xlate $opf" ) ||
160 die "can't call $xlate: $!";
163 #--------------------------------------------------------------------
170 # Created by: Suresh Chari
171 # IBM Thomas J. Watson Research Library
175 # Description: Optimized assembly routines for OpenSSL crypto
176 # on the 32 bitPowerPC platform.
181 # 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
182 # cleaned up code. Also made a single version which can
183 # be used for both the AIX and Linux compilers. See NOTE
185 # 12/05/03 Suresh Chari
186 # (with lots of help from) Andy Polyakov
188 # 1. Initial version 10/20/02 Suresh Chari
191 # The following file works for the xlc,cc
194 # NOTE: To get the file to link correctly with the gcc compiler
195 # you have to change the names of the routines and remove
196 # the first .(dot) character. This should automatically
197 # be done in the build process.
199 # Hand optimized assembly code for the following routines
212 # NOTE: It is possible to optimize this code more for
213 # specific PowerPC or Power architectures. On the Northstar
214 # architecture the optimizations in this file do
215 # NOT provide much improvement.
217 # If you have comments or suggestions to improve code send
218 # me a note at schari\@us.ibm.com
220 #--------------------------------------------------------------------------
222 # Defines to be used in the assembly code.
224 #.set r0,0 # we use it as storage for value of 0
225 #.set SP,1 # preserved
226 #.set RTOC,2 # preserved
227 #.set r3,3 # 1st argument/return value
228 #.set r4,4 # 2nd argument/volatile register
229 #.set r5,5 # 3rd argument/volatile register
237 #.set r13,13 # not used, nor any other "below" it...
239 # Declare function names to be global
240 # NOTE: For gcc these names MUST be changed to remove
241 # the first . i.e. for example change ".bn_sqr_comba4"
242 # to "bn_sqr_comba4". This should be automatically done
245 .globl .bn_sqr_comba4
246 .globl .bn_sqr_comba8
247 .globl .bn_mul_comba4
248 .globl .bn_mul_comba8
254 .globl .bn_mul_add_words
261 # NOTE: The following label name should be changed to
262 # "bn_sqr_comba4" i.e. remove the first dot
263 # for the gcc compiler. This should be automatically
270 # Optimized version of bn_sqr_comba4.
272 # void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
276 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
278 # r5,r6 are the two BN_ULONGs being multiplied.
279 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
280 # r9,r10, r11 are the equivalents of c1,c2, c3.
281 # Here's the assembly
284 xor r0,r0,r0 # set r0 = 0. Used in the addze
287 #sqr_add_c(a,0,c1,c2,c3)
290 $UMULH r10,r5,r5 #in first iteration. No need
291 #to add since c1=c2=c3=0.
292 # Note c3(r11) is NOT set to 0
295 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
296 # sqr_add_c2(a,1,0,c2,c3,c1);
301 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
303 addze r9,r0 # catch carry if any.
304 # r9= r0(=0) and carry
306 addc r10,r7,r10 # now add to temp result.
307 addze r11,r8 # r8 added to r11 which is 0
310 $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
311 #sqr_add_c(a,1,c3,c1,c2)
317 #sqr_add_c2(a,2,0,c3,c1,c2)
329 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
330 #sqr_add_c2(a,3,0,c1,c2,c3);
341 #sqr_add_c2(a,2,1,c1,c2,c3);
353 $ST r9,`3*$BNSZ`(r3) #r[3]=c1
354 #sqr_add_c(a,2,c2,c3,c1);
360 #sqr_add_c2(a,3,1,c2,c3,c1);
371 $ST r10,`4*$BNSZ`(r3) #r[4]=c2
372 #sqr_add_c2(a,3,2,c3,c1,c2);
383 $ST r11,`5*$BNSZ`(r3) #r[5] = c3
384 #sqr_add_c(a,3,c1,c2,c3);
390 $ST r9,`6*$BNSZ`(r3) #r[6]=c1
391 $ST r10,`7*$BNSZ`(r3) #r[7]=c2
396 # NOTE: The following label name should be changed to
397 # "bn_sqr_comba8" i.e. remove the first dot
398 # for the gcc compiler. This should be automatically
405 # This is an optimized version of the bn_sqr_comba8 routine.
406 # Tightly uses the adde instruction
409 # void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
413 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
415 # r5,r6 are the two BN_ULONGs being multiplied.
416 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
417 # r9,r10, r11 are the equivalents of c1,c2, c3.
419 # Possible optimization of loading all 8 longs of a into registers
420 # doesnt provide any speedup
423 xor r0,r0,r0 #set r0 = 0.Used in addze
426 #sqr_add_c(a,0,c1,c2,c3);
428 $UMULL r9,r5,r5 #1st iteration: no carries.
430 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
431 #sqr_add_c2(a,1,0,c2,c3,c1);
436 addc r10,r7,r10 #add the two register number
437 adde r11,r8,r0 # (r8,r7) to the three register
438 addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
440 addc r10,r7,r10 #add the two register number
441 adde r11,r8,r11 # (r8,r7) to the three register
442 addze r9,r9 # number (r9,r11,r10).
444 $ST r10,`1*$BNSZ`(r3) # r[1]=c2
446 #sqr_add_c(a,1,c3,c1,c2);
452 #sqr_add_c2(a,2,0,c3,c1,c2);
465 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
466 #sqr_add_c2(a,3,0,c1,c2,c3);
467 $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
478 #sqr_add_c2(a,2,1,c1,c2,c3);
492 $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
493 #sqr_add_c(a,2,c2,c3,c1);
500 #sqr_add_c2(a,3,1,c2,c3,c1);
512 #sqr_add_c2(a,4,0,c2,c3,c1);
525 $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
526 #sqr_add_c2(a,5,0,c3,c1,c2);
538 #sqr_add_c2(a,4,1,c3,c1,c2);
551 #sqr_add_c2(a,3,2,c3,c1,c2);
564 $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
565 #sqr_add_c(a,3,c1,c2,c3);
571 #sqr_add_c2(a,4,2,c1,c2,c3);
583 #sqr_add_c2(a,5,1,c1,c2,c3);
596 #sqr_add_c2(a,6,0,c1,c2,c3);
607 $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
608 #sqr_add_c2(a,7,0,c2,c3,c1);
619 #sqr_add_c2(a,6,1,c2,c3,c1);
631 #sqr_add_c2(a,5,2,c2,c3,c1);
642 #sqr_add_c2(a,4,3,c2,c3,c1);
654 $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
655 #sqr_add_c(a,4,c3,c1,c2);
661 #sqr_add_c2(a,5,3,c3,c1,c2);
671 #sqr_add_c2(a,6,2,c3,c1,c2);
683 #sqr_add_c2(a,7,1,c3,c1,c2);
694 $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
695 #sqr_add_c2(a,7,2,c1,c2,c3);
706 #sqr_add_c2(a,6,3,c1,c2,c3);
717 #sqr_add_c2(a,5,4,c1,c2,c3);
728 $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
729 #sqr_add_c(a,5,c2,c3,c1);
735 #sqr_add_c2(a,6,4,c2,c3,c1);
745 #sqr_add_c2(a,7,3,c2,c3,c1);
756 $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
757 #sqr_add_c2(a,7,4,c3,c1,c2);
767 #sqr_add_c2(a,6,5,c3,c1,c2);
778 $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
779 #sqr_add_c(a,6,c1,c2,c3);
785 #sqr_add_c2(a,7,5,c1,c2,c3)
795 $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
797 #sqr_add_c2(a,7,6,c2,c3,c1)
807 $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
808 #sqr_add_c(a,7,c3,c1,c2);
813 $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
814 $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
822 # NOTE: The following label name should be changed to
823 # "bn_mul_comba4" i.e. remove the first dot
824 # for the gcc compiler. This should be automatically
831 # This is an optimized version of the bn_mul_comba4 routine.
833 # void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
837 # r6, r7 are the 2 BN_ULONGs being multiplied.
838 # r8, r9 are the results of the 32x32 giving 64 multiply.
839 # r10, r11, r12 are the equivalents of c1, c2, and c3.
841 xor r0,r0,r0 #r0=0. Used in addze below.
842 #mul_add_c(a[0],b[0],c1,c2,c3);
847 $ST r10,`0*$BNSZ`(r3) #r[0]=c1
848 #mul_add_c(a[0],b[1],c2,c3,c1);
855 #mul_add_c(a[1],b[0],c2,c3,c1);
856 $LD r6, `1*$BNSZ`(r4)
857 $LD r7, `0*$BNSZ`(r5)
863 $ST r11,`1*$BNSZ`(r3) #r[1]=c2
864 #mul_add_c(a[2],b[0],c3,c1,c2);
871 #mul_add_c(a[1],b[1],c3,c1,c2);
879 #mul_add_c(a[0],b[2],c3,c1,c2);
887 $ST r12,`2*$BNSZ`(r3) #r[2]=c3
888 #mul_add_c(a[0],b[3],c1,c2,c3);
895 #mul_add_c(a[1],b[2],c1,c2,c3);
903 #mul_add_c(a[2],b[1],c1,c2,c3);
911 #mul_add_c(a[3],b[0],c1,c2,c3);
919 $ST r10,`3*$BNSZ`(r3) #r[3]=c1
920 #mul_add_c(a[3],b[1],c2,c3,c1);
927 #mul_add_c(a[2],b[2],c2,c3,c1);
935 #mul_add_c(a[1],b[3],c2,c3,c1);
943 $ST r11,`4*$BNSZ`(r3) #r[4]=c2
944 #mul_add_c(a[2],b[3],c3,c1,c2);
951 #mul_add_c(a[3],b[2],c3,c1,c2);
959 $ST r12,`5*$BNSZ`(r3) #r[5]=c3
960 #mul_add_c(a[3],b[3],c1,c2,c3);
967 $ST r10,`6*$BNSZ`(r3) #r[6]=c1
968 $ST r11,`7*$BNSZ`(r3) #r[7]=c2
973 # NOTE: The following label name should be changed to
974 # "bn_mul_comba8" i.e. remove the first dot
975 # for the gcc compiler. This should be automatically
982 # Optimized version of the bn_mul_comba8 routine.
984 # void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
988 # r6, r7 are the 2 BN_ULONGs being multiplied.
989 # r8, r9 are the results of the 32x32 giving 64 multiply.
990 # r10, r11, r12 are the equivalents of c1, c2, and c3.
992 xor r0,r0,r0 #r0=0. Used in addze below.
994 #mul_add_c(a[0],b[0],c1,c2,c3);
995 $LD r6,`0*$BNSZ`(r4) #a[0]
996 $LD r7,`0*$BNSZ`(r5) #b[0]
999 $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
1000 #mul_add_c(a[0],b[1],c2,c3,c1);
1001 $LD r7,`1*$BNSZ`(r5)
1005 addze r12,r9 # since we didnt set r12 to zero before.
1007 #mul_add_c(a[1],b[0],c2,c3,c1);
1008 $LD r6,`1*$BNSZ`(r4)
1009 $LD r7,`0*$BNSZ`(r5)
1015 $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
1016 #mul_add_c(a[2],b[0],c3,c1,c2);
1017 $LD r6,`2*$BNSZ`(r4)
1023 #mul_add_c(a[1],b[1],c3,c1,c2);
1024 $LD r6,`1*$BNSZ`(r4)
1025 $LD r7,`1*$BNSZ`(r5)
1031 #mul_add_c(a[0],b[2],c3,c1,c2);
1032 $LD r6,`0*$BNSZ`(r4)
1033 $LD r7,`2*$BNSZ`(r5)
1039 $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
1040 #mul_add_c(a[0],b[3],c1,c2,c3);
1041 $LD r7,`3*$BNSZ`(r5)
1047 #mul_add_c(a[1],b[2],c1,c2,c3);
1048 $LD r6,`1*$BNSZ`(r4)
1049 $LD r7,`2*$BNSZ`(r5)
1056 #mul_add_c(a[2],b[1],c1,c2,c3);
1057 $LD r6,`2*$BNSZ`(r4)
1058 $LD r7,`1*$BNSZ`(r5)
1064 #mul_add_c(a[3],b[0],c1,c2,c3);
1065 $LD r6,`3*$BNSZ`(r4)
1066 $LD r7,`0*$BNSZ`(r5)
1072 $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
1073 #mul_add_c(a[4],b[0],c2,c3,c1);
1074 $LD r6,`4*$BNSZ`(r4)
1080 #mul_add_c(a[3],b[1],c2,c3,c1);
1081 $LD r6,`3*$BNSZ`(r4)
1082 $LD r7,`1*$BNSZ`(r5)
1088 #mul_add_c(a[2],b[2],c2,c3,c1);
1089 $LD r6,`2*$BNSZ`(r4)
1090 $LD r7,`2*$BNSZ`(r5)
1096 #mul_add_c(a[1],b[3],c2,c3,c1);
1097 $LD r6,`1*$BNSZ`(r4)
1098 $LD r7,`3*$BNSZ`(r5)
1104 #mul_add_c(a[0],b[4],c2,c3,c1);
1105 $LD r6,`0*$BNSZ`(r4)
1106 $LD r7,`4*$BNSZ`(r5)
1112 $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
1113 #mul_add_c(a[0],b[5],c3,c1,c2);
1114 $LD r7,`5*$BNSZ`(r5)
1120 #mul_add_c(a[1],b[4],c3,c1,c2);
1121 $LD r6,`1*$BNSZ`(r4)
1122 $LD r7,`4*$BNSZ`(r5)
1128 #mul_add_c(a[2],b[3],c3,c1,c2);
1129 $LD r6,`2*$BNSZ`(r4)
1130 $LD r7,`3*$BNSZ`(r5)
1136 #mul_add_c(a[3],b[2],c3,c1,c2);
1137 $LD r6,`3*$BNSZ`(r4)
1138 $LD r7,`2*$BNSZ`(r5)
1144 #mul_add_c(a[4],b[1],c3,c1,c2);
1145 $LD r6,`4*$BNSZ`(r4)
1146 $LD r7,`1*$BNSZ`(r5)
1152 #mul_add_c(a[5],b[0],c3,c1,c2);
1153 $LD r6,`5*$BNSZ`(r4)
1154 $LD r7,`0*$BNSZ`(r5)
1160 $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
1161 #mul_add_c(a[6],b[0],c1,c2,c3);
1162 $LD r6,`6*$BNSZ`(r4)
1168 #mul_add_c(a[5],b[1],c1,c2,c3);
1169 $LD r6,`5*$BNSZ`(r4)
1170 $LD r7,`1*$BNSZ`(r5)
1176 #mul_add_c(a[4],b[2],c1,c2,c3);
1177 $LD r6,`4*$BNSZ`(r4)
1178 $LD r7,`2*$BNSZ`(r5)
1184 #mul_add_c(a[3],b[3],c1,c2,c3);
1185 $LD r6,`3*$BNSZ`(r4)
1186 $LD r7,`3*$BNSZ`(r5)
1192 #mul_add_c(a[2],b[4],c1,c2,c3);
1193 $LD r6,`2*$BNSZ`(r4)
1194 $LD r7,`4*$BNSZ`(r5)
1200 #mul_add_c(a[1],b[5],c1,c2,c3);
1201 $LD r6,`1*$BNSZ`(r4)
1202 $LD r7,`5*$BNSZ`(r5)
1208 #mul_add_c(a[0],b[6],c1,c2,c3);
1209 $LD r6,`0*$BNSZ`(r4)
1210 $LD r7,`6*$BNSZ`(r5)
1216 $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
1217 #mul_add_c(a[0],b[7],c2,c3,c1);
1218 $LD r7,`7*$BNSZ`(r5)
1224 #mul_add_c(a[1],b[6],c2,c3,c1);
1225 $LD r6,`1*$BNSZ`(r4)
1226 $LD r7,`6*$BNSZ`(r5)
1232 #mul_add_c(a[2],b[5],c2,c3,c1);
1233 $LD r6,`2*$BNSZ`(r4)
1234 $LD r7,`5*$BNSZ`(r5)
1240 #mul_add_c(a[3],b[4],c2,c3,c1);
1241 $LD r6,`3*$BNSZ`(r4)
1242 $LD r7,`4*$BNSZ`(r5)
1248 #mul_add_c(a[4],b[3],c2,c3,c1);
1249 $LD r6,`4*$BNSZ`(r4)
1250 $LD r7,`3*$BNSZ`(r5)
1256 #mul_add_c(a[5],b[2],c2,c3,c1);
1257 $LD r6,`5*$BNSZ`(r4)
1258 $LD r7,`2*$BNSZ`(r5)
1264 #mul_add_c(a[6],b[1],c2,c3,c1);
1265 $LD r6,`6*$BNSZ`(r4)
1266 $LD r7,`1*$BNSZ`(r5)
1272 #mul_add_c(a[7],b[0],c2,c3,c1);
1273 $LD r6,`7*$BNSZ`(r4)
1274 $LD r7,`0*$BNSZ`(r5)
1280 $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
1281 #mul_add_c(a[7],b[1],c3,c1,c2);
1282 $LD r7,`1*$BNSZ`(r5)
1288 #mul_add_c(a[6],b[2],c3,c1,c2);
1289 $LD r6,`6*$BNSZ`(r4)
1290 $LD r7,`2*$BNSZ`(r5)
1296 #mul_add_c(a[5],b[3],c3,c1,c2);
1297 $LD r6,`5*$BNSZ`(r4)
1298 $LD r7,`3*$BNSZ`(r5)
1304 #mul_add_c(a[4],b[4],c3,c1,c2);
1305 $LD r6,`4*$BNSZ`(r4)
1306 $LD r7,`4*$BNSZ`(r5)
1312 #mul_add_c(a[3],b[5],c3,c1,c2);
1313 $LD r6,`3*$BNSZ`(r4)
1314 $LD r7,`5*$BNSZ`(r5)
1320 #mul_add_c(a[2],b[6],c3,c1,c2);
1321 $LD r6,`2*$BNSZ`(r4)
1322 $LD r7,`6*$BNSZ`(r5)
1328 #mul_add_c(a[1],b[7],c3,c1,c2);
1329 $LD r6,`1*$BNSZ`(r4)
1330 $LD r7,`7*$BNSZ`(r5)
1336 $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
1337 #mul_add_c(a[2],b[7],c1,c2,c3);
1338 $LD r6,`2*$BNSZ`(r4)
1344 #mul_add_c(a[3],b[6],c1,c2,c3);
1345 $LD r6,`3*$BNSZ`(r4)
1346 $LD r7,`6*$BNSZ`(r5)
1352 #mul_add_c(a[4],b[5],c1,c2,c3);
1353 $LD r6,`4*$BNSZ`(r4)
1354 $LD r7,`5*$BNSZ`(r5)
1360 #mul_add_c(a[5],b[4],c1,c2,c3);
1361 $LD r6,`5*$BNSZ`(r4)
1362 $LD r7,`4*$BNSZ`(r5)
1368 #mul_add_c(a[6],b[3],c1,c2,c3);
1369 $LD r6,`6*$BNSZ`(r4)
1370 $LD r7,`3*$BNSZ`(r5)
1376 #mul_add_c(a[7],b[2],c1,c2,c3);
1377 $LD r6,`7*$BNSZ`(r4)
1378 $LD r7,`2*$BNSZ`(r5)
1384 $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
1385 #mul_add_c(a[7],b[3],c2,c3,c1);
1386 $LD r7,`3*$BNSZ`(r5)
1392 #mul_add_c(a[6],b[4],c2,c3,c1);
1393 $LD r6,`6*$BNSZ`(r4)
1394 $LD r7,`4*$BNSZ`(r5)
1400 #mul_add_c(a[5],b[5],c2,c3,c1);
1401 $LD r6,`5*$BNSZ`(r4)
1402 $LD r7,`5*$BNSZ`(r5)
1408 #mul_add_c(a[4],b[6],c2,c3,c1);
1409 $LD r6,`4*$BNSZ`(r4)
1410 $LD r7,`6*$BNSZ`(r5)
1416 #mul_add_c(a[3],b[7],c2,c3,c1);
1417 $LD r6,`3*$BNSZ`(r4)
1418 $LD r7,`7*$BNSZ`(r5)
1424 $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
1425 #mul_add_c(a[4],b[7],c3,c1,c2);
1426 $LD r6,`4*$BNSZ`(r4)
1432 #mul_add_c(a[5],b[6],c3,c1,c2);
1433 $LD r6,`5*$BNSZ`(r4)
1434 $LD r7,`6*$BNSZ`(r5)
1440 #mul_add_c(a[6],b[5],c3,c1,c2);
1441 $LD r6,`6*$BNSZ`(r4)
1442 $LD r7,`5*$BNSZ`(r5)
1448 #mul_add_c(a[7],b[4],c3,c1,c2);
1449 $LD r6,`7*$BNSZ`(r4)
1450 $LD r7,`4*$BNSZ`(r5)
1456 $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
1457 #mul_add_c(a[7],b[5],c1,c2,c3);
1458 $LD r7,`5*$BNSZ`(r5)
1464 #mul_add_c(a[6],b[6],c1,c2,c3);
1465 $LD r6,`6*$BNSZ`(r4)
1466 $LD r7,`6*$BNSZ`(r5)
1472 #mul_add_c(a[5],b[7],c1,c2,c3);
1473 $LD r6,`5*$BNSZ`(r4)
1474 $LD r7,`7*$BNSZ`(r5)
1480 $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
1481 #mul_add_c(a[6],b[7],c2,c3,c1);
1482 $LD r6,`6*$BNSZ`(r4)
1488 #mul_add_c(a[7],b[6],c2,c3,c1);
1489 $LD r6,`7*$BNSZ`(r4)
1490 $LD r7,`6*$BNSZ`(r5)
1496 $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
1497 #mul_add_c(a[7],b[7],c3,c1,c2);
1498 $LD r7,`7*$BNSZ`(r5)
1503 $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
1504 $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
1509 # NOTE: The following label name should be changed to
1510 # "bn_sub_words" i.e. remove the first dot
1511 # for the gcc compiler. This should be automatically
1518 # Handcoded version of bn_sub_words
1520 #BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1527 # Note: No loop unrolling done since this is not a performance
1530 xor r0,r0,r0 #set r0 = 0
1532 # check for r6 = 0 AND set carry bit.
1534 subfc. r7,r0,r6 # If r6 is 0 then result is 0.
1535 # if r6 > 0 then result !=0
1536 # In either case carry bit is set.
1537 beq Lppcasm_sub_adios
1542 Lppcasm_sub_mainloop:
1545 subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
1546 # if carry = 1 this is r7-r8. Else it
1547 # is r7-r8 -1 as we need.
1549 bdnz- Lppcasm_sub_mainloop
1551 subfze r3,r0 # if carry bit is set then r3 = 0 else -1
1552 andi. r3,r3,1 # keep only last bit.
1558 # NOTE: The following label name should be changed to
1559 # "bn_add_words" i.e. remove the first dot
1560 # for the gcc compiler. This should be automatically
1567 # Handcoded version of bn_add_words
1569 #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1576 # Note: No loop unrolling done since this is not a performance
1581 # check for r6 = 0. Is this needed?
1583 addic. r6,r6,0 #test r6 and clear carry bit.
1584 beq Lppcasm_add_adios
1589 Lppcasm_add_mainloop:
1594 bdnz- Lppcasm_add_mainloop
1596 addze r3,r0 #return carry bit.
1601 # NOTE: The following label name should be changed to
1602 # "bn_div_words" i.e. remove the first dot
1603 # for the gcc compiler. This should be automatically
1610 # This is a cleaned up version of code generated by
1611 # the AIX compiler. The only optimization is to use
1612 # the PPC instruction to count leading zeros instead
1613 # of call to num_bits_word. Since this was compiled
1614 # only at level -O2 we can possibly squeeze it more?
1620 $UCMPI 0,r5,0 # compare r5 and 0
1621 bne Lppcasm_div1 # proceed if d!=0
1622 li r3,-1 # d=0 return -1
1627 $CNTLZ. r7,r5 #r7 = num leading 0s in d.
1628 beq Lppcasm_div2 #proceed if no leading zeros
1629 subf r8,r7,r8 #r8 = BN_num_bits_word(d)
1630 $SHR. r9,r3,r8 #are there any bits above r8'th?
1631 $TR 16,r9,r0 #if there're, signal to dump core...
1633 $UCMP 0,r3,r5 #h>=d?
1634 blt Lppcasm_div3 #goto Lppcasm_div3 if not
1635 subf r3,r5,r3 #h-=d ;
1636 Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
1637 cmpi 0,0,r7,0 # is (i == 0)?
1639 $SHL r3,r3,r7 # h = (h<< i)
1640 $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
1641 $SHL r5,r5,r7 # d<<=i
1642 or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
1643 $SHL r4,r4,r7 # l <<=i
1645 $SHRI r9,r5,`$BITS/2` # r9 = dh
1646 # dl will be computed when needed
1647 # as it saves registers.
1649 mtctr r6 #counter will be in count.
1650 Lppcasm_divouterloop:
1651 $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
1652 $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
1653 # compute here for innerloop.
1654 $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
1655 bne Lppcasm_div5 # goto Lppcasm_div5 if not
1658 $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
1661 $UDIV r8,r3,r9 #q = h/dh
1663 $UMULL r12,r9,r8 #th = q*dh
1664 $CLRU r10,r5,`$BITS/2` #r10=dl
1665 $UMULL r6,r8,r10 #tl = q*dl
1667 Lppcasm_divinnerloop:
1668 subf r10,r12,r3 #t = h -th
1669 $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
1670 addic. r7,r7,0 #test if r7 == 0. used below.
1671 # now want to compute
1672 # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
1673 # the following 2 instructions do that
1674 $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
1675 or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
1676 $UCMP cr1,r6,r7 # compare (tl <= r7)
1677 bne Lppcasm_divinnerexit
1678 ble cr1,Lppcasm_divinnerexit
1680 subf r12,r9,r12 #th -=dh
1681 $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
1682 subf r6,r10,r6 #tl -=dl
1683 b Lppcasm_divinnerloop
1684 Lppcasm_divinnerexit:
1685 $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
1686 $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
1687 $UCMP cr1,r4,r11 # compare l and tl
1688 add r12,r12,r10 # th+=t
1689 bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
1690 addi r12,r12,1 # th++
1692 subf r11,r11,r4 #r11=l-tl
1693 $UCMP cr1,r3,r12 #compare h and th
1694 bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
1698 subf r12,r12,r3 #r12 = h-th
1699 $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
1701 # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
1702 # the following 2 instructions will do this.
1703 $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
1704 $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
1705 bdz Lppcasm_div9 #if (count==0) break ;
1706 $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
1707 b Lppcasm_divouterloop
1714 # NOTE: The following label name should be changed to
1715 # "bn_sqr_words" i.e. remove the first dot
1716 # for the gcc compiler. This should be automatically
1722 # Optimized version of bn_sqr_words
1724 # void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
1733 # No unrolling done here. Not performance critical.
1735 addic. r5,r5,0 #test r5.
1736 beq Lppcasm_sqr_adios
1740 Lppcasm_sqr_mainloop:
1741 #sqr(r[0],r[1],a[0]);
1747 bdnz- Lppcasm_sqr_mainloop
1754 # NOTE: The following label name should be changed to
1755 # "bn_mul_words" i.e. remove the first dot
1756 # for the gcc compiler. This should be automatically
1763 # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1770 xor r12,r12,r12 # used for carry
1771 rlwinm. r7,r5,30,2,31 # num >> 2
1775 #mul(rp[0],ap[0],w,c1);
1776 $LD r8,`0*$BNSZ`(r4)
1780 #addze r10,r10 #carry is NOT ignored.
1781 #will be taken care of
1782 #in second spin below
1784 $ST r9,`0*$BNSZ`(r3)
1785 #mul(rp[1],ap[1],w,c1);
1786 $LD r8,`1*$BNSZ`(r4)
1791 $ST r11,`1*$BNSZ`(r3)
1792 #mul(rp[2],ap[2],w,c1);
1793 $LD r8,`2*$BNSZ`(r4)
1798 $ST r9,`2*$BNSZ`(r3)
1799 #mul_add(rp[3],ap[3],w,c1);
1800 $LD r8,`3*$BNSZ`(r4)
1804 addze r12,r12 #this spin we collect carry into
1806 $ST r11,`3*$BNSZ`(r3)
1808 addi r3,r3,`4*$BNSZ`
1809 addi r4,r4,`4*$BNSZ`
1810 bdnz- Lppcasm_mw_LOOP
1815 #mul(rp[0],ap[0],w,c1);
1816 $LD r8,`0*$BNSZ`(r4)
1821 $ST r9,`0*$BNSZ`(r3)
1829 #mul(rp[1],ap[1],w,c1);
1830 $LD r8,`1*$BNSZ`(r4)
1835 $ST r9,`1*$BNSZ`(r3)
1842 #mul_add(rp[2],ap[2],w,c1);
1843 $LD r8,`2*$BNSZ`(r4)
1848 $ST r9,`2*$BNSZ`(r3)
1857 # NOTE: The following label name should be changed to
1858 # "bn_mul_add_words" i.e. remove the first dot
1859 # for the gcc compiler. This should be automatically
1866 # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1873 # empirical evidence suggests that unrolled version performs best!!
1875 xor r0,r0,r0 #r0 = 0
1876 xor r12,r12,r12 #r12 = 0 . used for carry
1877 rlwinm. r7,r5,30,2,31 # num >> 2
1878 beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
1880 Lppcasm_maw_mainloop:
1881 #mul_add(rp[0],ap[0],w,c1);
1882 $LD r8,`0*$BNSZ`(r4)
1883 $LD r11,`0*$BNSZ`(r3)
1886 addc r9,r9,r12 #r12 is carry.
1890 #the above instruction addze
1891 #is NOT needed. Carry will NOT
1892 #be ignored. It's not affected
1893 #by multiply and will be collected
1895 $ST r9,`0*$BNSZ`(r3)
1897 #mul_add(rp[1],ap[1],w,c1);
1898 $LD r8,`1*$BNSZ`(r4)
1899 $LD r9,`1*$BNSZ`(r3)
1902 adde r11,r11,r10 #r10 is carry.
1906 $ST r11,`1*$BNSZ`(r3)
1908 #mul_add(rp[2],ap[2],w,c1);
1909 $LD r8,`2*$BNSZ`(r4)
1911 $LD r11,`2*$BNSZ`(r3)
1917 $ST r9,`2*$BNSZ`(r3)
1919 #mul_add(rp[3],ap[3],w,c1);
1920 $LD r8,`3*$BNSZ`(r4)
1922 $LD r9,`3*$BNSZ`(r3)
1928 $ST r11,`3*$BNSZ`(r3)
1929 addi r3,r3,`4*$BNSZ`
1930 addi r4,r4,`4*$BNSZ`
1931 bdnz- Lppcasm_maw_mainloop
1933 Lppcasm_maw_leftover:
1935 beq Lppcasm_maw_adios
1938 #mul_add(rp[0],ap[0],w,c1);
1950 bdz Lppcasm_maw_adios
1951 #mul_add(rp[1],ap[1],w,c1);
1962 bdz Lppcasm_maw_adios
1963 #mul_add(rp[2],ap[2],w,c1);
1980 $data =~ s/\`([^\`]*)\`/eval $1/gem;