3 # Specific mode implementations for SPARC T4 modules.
5 my ($inp,$out,$len,$key,$ivec,$enc)=map("%i$_",(0..5));
6 my ($ileft,$iright,$ooff,$omask,$ivoff)=map("%l$_",(1..7));
8 sub alg_cbc_encrypt_implement {
12 .globl ${alg}${bits}_t4_cbc_encrypt
14 ${alg}${bits}_t4_cbc_encrypt:
15 save %sp, -$::frame, %sp
17 $::code.=<<___ if (!$::evp);
18 andcc $ivec, 7, $ivoff
19 alignaddr $ivec, %g0, $ivec
21 ldd [$ivec + 0], %f0 ! load ivec
25 faligndata %f0, %f2, %f0
26 faligndata %f2, %f4, %f2
29 $::code.=<<___ if ($::evp);
36 call _${alg}${bits}_load_enckey
43 sub $iright, $ileft, $iright
45 alignaddrl $out, %g0, $out
46 srl $omask, $ooff, $omask
48 .L${bits}_cbc_enc_loop:
55 srlx %o1, $iright, %g1
58 srlx %o2, $iright, %o2
61 xor %g4, %o0, %o0 ! ^= rk[0]
66 fxor %f12, %f0, %f0 ! ^= ivec
68 call _${alg}${bits}_encrypt_1x
76 brnz,pt $len, .L${bits}_cbc_enc_loop
79 $::code.=<<___ if ($::evp);
85 $::code.=<<___ if (!$::evp);
89 std %f0, [$ivec + 0] ! write out ivec
97 2: ldxa [$inp]0x82, %o0 ! avoid read-after-write hazard
98 ! and ~3x deterioration
100 faligndata %f0, %f0, %f4 ! handle unaligned output
101 faligndata %f0, %f2, %f6
102 faligndata %f2, %f2, %f8
104 stda %f4, [$out + $omask]0xc0 ! partial store
107 orn %g0, $omask, $omask
108 stda %f8, [$out + $omask]0xc0 ! partial store
110 brnz,pt $len, .L${bits}_cbc_enc_loop+4
111 orn %g0, $omask, $omask
113 $::code.=<<___ if ($::evp);
119 $::code.=<<___ if (!$::evp);
123 std %f0, [$ivec + 0] ! write out ivec
129 3: alignaddrl $ivec, $ivoff, %g0 ! handle unaligned ivec
131 srl $omask, $ivoff, $omask
132 faligndata %f0, %f0, %f4
133 faligndata %f0, %f2, %f6
134 faligndata %f2, %f2, %f8
135 stda %f4, [$ivec + $omask]0xc0
138 orn %g0, $omask, $omask
139 stda %f8, [$ivec + $omask]0xc0
144 .type ${alg}${bits}_t4_cbc_encrypt,#function
145 .size ${alg}${bits}_t4_cbc_encrypt,.-${alg}${bits}_t4_cbc_encrypt
149 sub alg_cbc_decrypt_implement {
150 my ($alg,$bits) = @_;
153 .globl ${alg}${bits}_t4_cbc_decrypt
155 ${alg}${bits}_t4_cbc_decrypt:
156 save %sp, -$::frame, %sp
158 $::code.=<<___ if (!$::evp);
159 andcc $ivec, 7, $ivoff
160 alignaddr $ivec, %g0, $ivec
162 ldd [$ivec + 0], %f12 ! load ivec
164 ldd [$ivec + 8], %f14
165 ldd [$ivec + 16], %f0
166 faligndata %f12, %f14, %f12
167 faligndata %f14, %f0, %f14
170 $::code.=<<___ if ($::evp);
171 ld [$ivec + 0], %f12 ! load ivec
174 ld [$ivec + 12], %f15
177 call _${alg}${bits}_load_deckey
179 andcc $len, 1, %g0 ! is number of blocks even?
182 sll $ileft, 3, $ileft
185 sub $iright, $ileft, $iright
187 alignaddrl $out, %g0, $out
188 bz %icc, .L${bits}_cbc_dec_loop2x
189 srl $omask, $ooff, $omask
190 .L${bits}_cbc_dec_loop:
196 sllx %o0, $ileft, %o0
197 srlx %o1, $iright, %g1
198 sllx %o1, $ileft, %o1
200 srlx %o2, $iright, %o2
203 xor %g4, %o0, %o2 ! ^= rk[0]
208 call _${alg}${bits}_decrypt_1x
211 fxor %f12, %f0, %f0 ! ^= ivec
221 brnz,pt $len, .L${bits}_cbc_dec_loop2x
224 $::code.=<<___ if ($::evp);
228 st %f15, [$ivec + 12]
230 $::code.=<<___ if (!$::evp);
231 brnz,pn $ivoff, .L${bits}_cbc_dec_unaligned_ivec
234 std %f12, [$ivec + 0] ! write out ivec
235 std %f14, [$ivec + 8]
242 2: ldxa [$inp]0x82, %o0 ! avoid read-after-write hazard
243 ! and ~3x deterioration
245 faligndata %f0, %f0, %f4 ! handle unaligned output
246 faligndata %f0, %f2, %f6
247 faligndata %f2, %f2, %f8
249 stda %f4, [$out + $omask]0xc0 ! partial store
252 orn %g0, $omask, $omask
253 stda %f8, [$out + $omask]0xc0 ! partial store
255 brnz,pt $len, .L${bits}_cbc_dec_loop2x+4
256 orn %g0, $omask, $omask
258 $::code.=<<___ if ($::evp);
262 st %f15, [$ivec + 12]
264 $::code.=<<___ if (!$::evp);
265 brnz,pn $ivoff, .L${bits}_cbc_dec_unaligned_ivec
268 std %f12, [$ivec + 0] ! write out ivec
269 std %f14, [$ivec + 8]
275 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
277 .L${bits}_cbc_dec_loop2x:
285 sllx %o0, $ileft, %o0
286 srlx %o1, $iright, %g1
288 sllx %o1, $ileft, %o1
289 srlx %o2, $iright, %g1
291 sllx %o2, $ileft, %o2
292 srlx %o3, $iright, %g1
294 sllx %o3, $ileft, %o3
295 srlx %o4, $iright, %o4
298 xor %g4, %o0, %o4 ! ^= rk[0]
307 call _${alg}${bits}_decrypt_2x
312 fxor %f12, %f0, %f0 ! ^= ivec
326 brnz,pt $len, .L${bits}_cbc_dec_loop2x
329 $::code.=<<___ if ($::evp);
333 st %f15, [$ivec + 12]
335 $::code.=<<___ if (!$::evp);
336 brnz,pn $ivoff, .L${bits}_cbc_dec_unaligned_ivec
339 std %f12, [$ivec + 0] ! write out ivec
340 std %f14, [$ivec + 8]
347 2: ldxa [$inp]0x82, %o0 ! avoid read-after-write hazard
348 ! and ~3x deterioration
350 faligndata %f0, %f0, %f8 ! handle unaligned output
351 faligndata %f0, %f2, %f0
352 faligndata %f2, %f4, %f2
353 faligndata %f4, %f6, %f4
354 faligndata %f6, %f6, %f6
355 stda %f8, [$out + $omask]0xc0 ! partial store
360 orn %g0, $omask, $omask
361 stda %f6, [$out + $omask]0xc0 ! partial store
363 brnz,pt $len, .L${bits}_cbc_dec_loop2x+4
364 orn %g0, $omask, $omask
366 $::code.=<<___ if ($::evp);
370 st %f15, [$ivec + 12]
372 $::code.=<<___ if (!$::evp);
373 brnz,pn $ivoff, .L${bits}_cbc_dec_unaligned_ivec
376 std %f12, [$ivec + 0] ! write out ivec
377 std %f14, [$ivec + 8]
382 .L${bits}_cbc_dec_unaligned_ivec:
383 alignaddrl $ivec, $ivoff, %g0 ! handle unaligned ivec
385 srl $omask, $ivoff, $omask
386 faligndata %f12, %f12, %f0
387 faligndata %f12, %f14, %f2
388 faligndata %f14, %f14, %f4
389 stda %f0, [$ivec + $omask]0xc0
392 orn %g0, $omask, $omask
393 stda %f4, [$ivec + $omask]0xc0
398 .type ${alg}${bits}_t4_cbc_decrypt,#function
399 .size ${alg}${bits}_t4_cbc_decrypt,.-${alg}${bits}_t4_cbc_decrypt
403 sub alg_ctr32_implement {
404 my ($alg,$bits) = @_;
407 .globl ${alg}${bits}_t4_ctr32_encrypt
409 ${alg}${bits}_t4_ctr32_encrypt:
410 save %sp, -$::frame, %sp
412 call _${alg}${bits}_load_enckey
415 ld [$ivec + 0], %l4 ! counter
423 xor %o5, %g4, %g4 ! ^= rk[0]
425 movxtod %g4, %f14 ! most significant 64 bits
427 andcc $len, 1, %g0 ! is number of blocks even?
430 sll $ileft, 3, $ileft
433 sub $iright, $ileft, $iright
435 alignaddrl $out, %g0, $out
436 bz %icc, .L${bits}_ctr32_loop2x
437 srl $omask, $ooff, $omask
438 .L${bits}_ctr32_loop:
444 sllx %o0, $ileft, %o0
445 srlx %o1, $iright, %g1
446 sllx %o1, $ileft, %o1
448 srlx %o2, $iright, %o2
451 xor %g5, %l7, %g1 ! ^= rk[0]
454 srl %l7, 0, %l7 ! clruw
456 $::code.=<<___ if ($alg eq "aes");
457 aes_eround01 %f16, %f14, %f2, %f4
458 aes_eround23 %f18, %f14, %f2, %f2
460 $::code.=<<___ if ($alg eq "cmll");
461 camellia_f %f16, %f2, %f14, %f2
462 camellia_f %f18, %f14, %f2, %f0
465 call _${alg}${bits}_encrypt_1x+8
470 fxor %f10, %f0, %f0 ! ^= inp
478 brnz,pt $len, .L${bits}_ctr32_loop2x
485 2: ldxa [$inp]0x82, %o0 ! avoid read-after-write hazard
486 ! and ~3x deterioration
488 faligndata %f0, %f0, %f4 ! handle unaligned output
489 faligndata %f0, %f2, %f6
490 faligndata %f2, %f2, %f8
491 stda %f4, [$out + $omask]0xc0 ! partial store
494 orn %g0, $omask, $omask
495 stda %f8, [$out + $omask]0xc0 ! partial store
497 brnz,pt $len, .L${bits}_ctr32_loop2x+4
498 orn %g0, $omask, $omask
503 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
505 .L${bits}_ctr32_loop2x:
513 sllx %o0, $ileft, %o0
514 srlx %o1, $iright, %g1
516 sllx %o1, $ileft, %o1
517 srlx %o2, $iright, %g1
519 sllx %o2, $ileft, %o2
520 srlx %o3, $iright, %g1
522 sllx %o3, $ileft, %o3
523 srlx %o4, $iright, %o4
526 xor %g5, %l7, %g1 ! ^= rk[0]
529 srl %l7, 0, %l7 ! clruw
533 srl %l7, 0, %l7 ! clruw
536 $::code.=<<___ if ($alg eq "aes");
537 aes_eround01 %f16, %f14, %f2, %f8
538 aes_eround23 %f18, %f14, %f2, %f2
539 aes_eround01 %f16, %f14, %f6, %f10
540 aes_eround23 %f18, %f14, %f6, %f6
542 $::code.=<<___ if ($alg eq "cmll");
543 camellia_f %f16, %f2, %f14, %f2
544 camellia_f %f16, %f6, %f14, %f6
545 camellia_f %f18, %f14, %f2, %f0
546 camellia_f %f18, %f14, %f6, %f4
549 call _${alg}${bits}_encrypt_2x+16
555 fxor %f8, %f0, %f0 ! ^= inp
568 brnz,pt $len, .L${bits}_ctr32_loop2x
575 2: ldxa [$inp]0x82, %o0 ! avoid read-after-write hazard
576 ! and ~3x deterioration
578 faligndata %f0, %f0, %f8 ! handle unaligned output
579 faligndata %f0, %f2, %f0
580 faligndata %f2, %f4, %f2
581 faligndata %f4, %f6, %f4
582 faligndata %f6, %f6, %f6
584 stda %f8, [$out + $omask]0xc0 ! partial store
589 orn %g0, $omask, $omask
590 stda %f6, [$out + $omask]0xc0 ! partial store
592 brnz,pt $len, .L${bits}_ctr32_loop2x+4
593 orn %g0, $omask, $omask
597 .type ${alg}${bits}_t4_ctr32_encrypt,#function
598 .size ${alg}${bits}_t4_ctr32_encrypt,.-${alg}${bits}_t4_ctr32_encrypt
602 # Purpose of these subroutines is to explicitly encode VIS instructions,
603 # so that one can compile the module without having to specify VIS
604 # extentions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
605 # Idea is to reserve for option to produce "universal" binary and let
606 # programmer detect if current CPU is VIS capable at run-time.
608 my ($mnemonic,$rs1,$rs2,$rd)=@_;
610 my %visopf = ( "faligndata" => 0x048,
615 $ref = "$mnemonic\t$rs1,$rs2,$rd";
617 if ($opf=$visopf{$mnemonic}) {
618 foreach ($rs1,$rs2,$rd) {
619 return $ref if (!/%f([0-9]{1,2})/);
622 return $ref if ($1&1);
623 # re-encode for upper double register addressing
628 return sprintf ".word\t0x%08x !%s",
629 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
636 my ($mnemonic,$rs1,$rs2,$rd)=@_;
637 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
638 my $ref = "$mnemonic\t$rs1,$rs2,$rd";
639 my $opf = $mnemonic =~ /l$/ ? 0x01a :0x18;
641 foreach ($rs1,$rs2,$rd) {
642 if (/%([goli])([0-7])/) { $_=$bias{$1}+$2; }
643 else { return $ref; }
645 return sprintf ".word\t0x%08x !%s",
646 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
650 sub unaes_round { # 4-argument instructions
651 my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
653 my %aesopf = ( "aes_eround01" => 0,
657 "aes_eround01_l"=> 4,
658 "aes_eround23_l"=> 5,
659 "aes_dround01_l"=> 6,
660 "aes_dround23_l"=> 7,
661 "aes_kexpand1" => 8 );
663 $ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
665 if (defined($opf=$aesopf{$mnemonic})) {
666 $rs3 = ($rs3 =~ /%f([0-6]*[02468])/) ? (($1|$1>>5)&31) : $rs3;
667 foreach ($rs1,$rs2,$rd) {
668 return $ref if (!/%f([0-9]{1,2})/);
671 return $ref if ($1&1);
672 # re-encode for upper double register addressing
677 return sprintf ".word\t0x%08x !%s",
678 2<<30|$rd<<25|0x19<<19|$rs1<<14|$rs3<<9|$opf<<5|$rs2,
685 sub unaes_kexpand { # 3-argument instructions
686 my ($mnemonic,$rs1,$rs2,$rd)=@_;
688 my %aesopf = ( "aes_kexpand0" => 0x130,
689 "aes_kexpand2" => 0x131 );
691 $ref = "$mnemonic\t$rs1,$rs2,$rd";
693 if (defined($opf=$aesopf{$mnemonic})) {
694 foreach ($rs1,$rs2,$rd) {
695 return $ref if (!/%f([0-9]{1,2})/);
698 return $ref if ($1&1);
699 # re-encode for upper double register addressing
704 return sprintf ".word\t0x%08x !%s",
705 2<<30|$rd<<25|0x36<<19|$rs1<<14|$opf<<5|$rs2,
712 sub uncamellia_f { # 4-argument instructions
713 my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
716 $ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
719 $rs3 = ($rs3 =~ /%f([0-6]*[02468])/) ? (($1|$1>>5)&31) : $rs3;
720 foreach ($rs1,$rs2,$rd) {
721 return $ref if (!/%f([0-9]{1,2})/);
724 return $ref if ($1&1);
725 # re-encode for upper double register addressing
730 return sprintf ".word\t0x%08x !%s",
731 2<<30|$rd<<25|0x19<<19|$rs1<<14|$rs3<<9|0xc<<5|$rs2,
738 sub uncamellia3 { # 3-argument instructions
739 my ($mnemonic,$rs1,$rs2,$rd)=@_;
741 my %cmllopf = ( "camellia_fl" => 0x13c,
742 "camellia_fli" => 0x13d );
744 $ref = "$mnemonic\t$rs1,$rs2,$rd";
746 if (defined($opf=$cmllopf{$mnemonic})) {
747 foreach ($rs1,$rs2,$rd) {
748 return $ref if (!/%f([0-9]{1,2})/);
751 return $ref if ($1&1);
752 # re-encode for upper double register addressing
757 return sprintf ".word\t0x%08x !%s",
758 2<<30|$rd<<25|0x36<<19|$rs1<<14|$opf<<5|$rs2,
765 sub unmovxtox { # 2-argument instructions
766 my ($mnemonic,$rs,$rd)=@_;
767 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24, "f" => 0 );
769 my %movxopf = ( "movdtox" => 0x110,
773 "movwtos" => 0x119 );
775 $ref = "$mnemonic\t$rs,$rd";
777 if (defined($opf=$movxopf{$mnemonic})) {
779 return $ref if (!/%([fgoli])([0-9]{1,2})/);
782 return $ref if ($2&1);
783 # re-encode for upper double register addressing
788 return sprintf ".word\t0x%08x !%s",
789 2<<30|$rd<<25|0x36<<19|$opf<<5|$rs,
797 foreach (split("\n",$::code)) {
798 s/\`([^\`]*)\`/eval $1/ge;
800 s/\b(f[a-z]+2[sd]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})\s*$/$1\t%f0,$2,$3/g;
802 s/\b(aes_[edk][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
803 &unaes_round($1,$2,$3,$4,$5)
805 s/\b(aes_kexpand[02])\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
806 &unaes_kexpand($1,$2,$3,$4)
808 s/\b(camellia_f)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
809 &uncamellia_f($1,$2,$3,$4,$5)
811 s/\b(camellia_[^s]+)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
812 &uncamellia3($1,$2,$3,$4)
814 s/\b(mov[ds]to\w+)\s+(%f[0-9]{1,2}),\s*(%[goli][0-7])/
817 s/\b(mov[xw]to[ds])\s+(%[goli][0-7]),\s*(%f[0-9]{1,2})/
820 s/\b(f[^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
823 s/\b(alignaddr[l]*)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
824 &unalignaddr($1,$2,$3,$4)