Add assembly support to ios64-cross.
[openssl.git] / crypto / modes / asm / ghashv8-armx.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
11 #
12 # June 2014
13 #
14 # Initial version was developed in tight cooperation with Ard
15 # Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
16 # other assembly modules. Just like aesv8-armx.pl this module
17 # supports both AArch32 and AArch64 execution modes.
18 #
19 # Current performance in cycles per processed byte:
20 #
21 #               PMULL[2]        32-bit NEON(*)
22 # Apple A7      1.76            5.62
23 # Cortex-A53    1.45            8.39
24 # Cortex-A57    2.22            7.61
25 #
26 # (*)   presented for reference/comparison purposes;
27
28 $flavour = shift;
29 $output  = shift;
30
31 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
32 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
33 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
34 die "can't locate arm-xlate.pl";
35
36 open OUT,"| \"$^X\" $xlate $flavour $output";
37 *STDOUT=*OUT;
38
39 $Xi="x0";       # argument block
40 $Htbl="x1";
41 $inp="x2";
42 $len="x3";
43
44 $inc="x12";
45
46 {
47 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
48 my ($t0,$t1,$t2,$t3,$H,$Hhl)=map("q$_",(8..14));
49
50 $code=<<___;
51 #include "arm_arch.h"
52
53 .text
54 ___
55 $code.=".arch   armv8-a+crypto\n"       if ($flavour =~ /64/);
56 $code.=".fpu    neon\n.code     32\n"   if ($flavour !~ /64/);
57
58 $code.=<<___;
59 .global gcm_init_v8
60 .type   gcm_init_v8,%function
61 .align  4
62 gcm_init_v8:
63         vld1.64         {$t1},[x1]              @ load H
64         vmov.i8         $t0,#0xe1
65         vext.8          $IN,$t1,$t1,#8
66         vshl.i64        $t0,$t0,#57
67         vshr.u64        $t2,$t0,#63
68         vext.8          $t0,$t2,$t0,#8          @ t0=0xc2....01
69         vdup.32         $t1,${t1}[1]
70         vshr.u64        $t3,$IN,#63
71         vshr.s32        $t1,$t1,#31             @ broadcast carry bit
72         vand            $t3,$t3,$t0
73         vshl.i64        $IN,$IN,#1
74         vext.8          $t3,$t3,$t3,#8
75         vand            $t0,$t0,$t1
76         vorr            $IN,$IN,$t3             @ H<<<=1
77         veor            $IN,$IN,$t0             @ twisted H
78         vst1.64         {$IN},[x0]
79
80         ret
81 .size   gcm_init_v8,.-gcm_init_v8
82
83 .global gcm_gmult_v8
84 .type   gcm_gmult_v8,%function
85 .align  4
86 gcm_gmult_v8:
87         vld1.64         {$t1},[$Xi]             @ load Xi
88         vmov.i8         $t3,#0xe1
89         vld1.64         {$H},[$Htbl]            @ load twisted H
90         vshl.u64        $t3,$t3,#57
91 #ifndef __ARMEB__
92         vrev64.8        $t1,$t1
93 #endif
94         vext.8          $Hhl,$H,$H,#8
95         mov             $len,#0
96         vext.8          $IN,$t1,$t1,#8
97         mov             $inc,#0
98         veor            $Hhl,$Hhl,$H            @ Karatsuba pre-processing
99         mov             $inp,$Xi
100         b               .Lgmult_v8
101 .size   gcm_gmult_v8,.-gcm_gmult_v8
102
103 .global gcm_ghash_v8
104 .type   gcm_ghash_v8,%function
105 .align  4
106 gcm_ghash_v8:
107         vld1.64         {$Xl},[$Xi]             @ load [rotated] Xi
108         subs            $len,$len,#16
109         vmov.i8         $t3,#0xe1
110         mov             $inc,#16
111         vld1.64         {$H},[$Htbl]            @ load twisted H
112         cclr            $inc,eq
113         vext.8          $Xl,$Xl,$Xl,#8
114         vshl.u64        $t3,$t3,#57
115         vld1.64         {$t1},[$inp],$inc       @ load [rotated] inp
116         vext.8          $Hhl,$H,$H,#8
117 #ifndef __ARMEB__
118         vrev64.8        $Xl,$Xl
119         vrev64.8        $t1,$t1
120 #endif
121         veor            $Hhl,$Hhl,$H            @ Karatsuba pre-processing
122         vext.8          $IN,$t1,$t1,#8
123         b               .Loop_v8
124
125 .align  4
126 .Loop_v8:
127         vext.8          $t2,$Xl,$Xl,#8
128         veor            $IN,$IN,$Xl             @ inp^=Xi
129         veor            $t1,$t1,$t2             @ $t1 is rotated inp^Xi
130
131 .Lgmult_v8:
132         vpmull.p64      $Xl,$H,$IN              @ H.lo·Xi.lo
133         veor            $t1,$t1,$IN             @ Karatsuba pre-processing
134         vpmull2.p64     $Xh,$H,$IN              @ H.hi·Xi.hi
135         subs            $len,$len,#16
136         vpmull.p64      $Xm,$Hhl,$t1            @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
137         cclr            $inc,eq
138
139         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
140         veor            $t2,$Xl,$Xh
141         veor            $Xm,$Xm,$t1
142          vld1.64        {$t1},[$inp],$inc       @ load [rotated] inp
143         veor            $Xm,$Xm,$t2
144         vpmull.p64      $t2,$Xl,$t3             @ 1st phase
145
146         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
147         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
148 #ifndef __ARMEB__
149          vrev64.8       $t1,$t1
150 #endif
151         veor            $Xl,$Xm,$t2
152          vext.8         $IN,$t1,$t1,#8
153
154         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase
155         vpmull.p64      $Xl,$Xl,$t3
156         veor            $t2,$t2,$Xh
157         veor            $Xl,$Xl,$t2
158         b.hs            .Loop_v8
159
160 #ifndef __ARMEB__
161         vrev64.8        $Xl,$Xl
162 #endif
163         vext.8          $Xl,$Xl,$Xl,#8
164         vst1.64         {$Xl},[$Xi]             @ write out Xi
165
166         ret
167 .size   gcm_ghash_v8,.-gcm_ghash_v8
168 ___
169 }
170 $code.=<<___;
171 .asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
172 .align  2
173 ___
174
175 if ($flavour =~ /64/) {                 ######## 64-bit code
176     sub unvmov {
177         my $arg=shift;
178
179         $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
180         sprintf "ins    v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
181     }
182     foreach(split("\n",$code)) {
183         s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o     or
184         s/vmov\.i8/movi/o               or      # fix up legacy mnemonics
185         s/vmov\s+(.*)/unvmov($1)/geo    or
186         s/vext\.8/ext/o                 or
187         s/vshr\.s/sshr\.s/o             or
188         s/vshr/ushr/o                   or
189         s/^(\s+)v/$1/o                  or      # strip off v prefix
190         s/\bbx\s+lr\b/ret/o;
191
192         s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;  # old->new registers
193         s/@\s/\/\//o;                           # old->new style commentary
194
195         # fix up remainig legacy suffixes
196         s/\.[ui]?8(\s)/$1/o;
197         s/\.[uis]?32//o and s/\.16b/\.4s/go;
198         m/\.p64/o and s/\.16b/\.1q/o;           # 1st pmull argument
199         m/l\.p64/o and s/\.16b/\.1d/go;         # 2nd and 3rd pmull arguments
200         s/\.[uisp]?64//o and s/\.16b/\.2d/go;
201         s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
202
203         print $_,"\n";
204     }
205 } else {                                ######## 32-bit code
206     sub unvdup32 {
207         my $arg=shift;
208
209         $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
210         sprintf "vdup.32        q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
211     }
212     sub unvpmullp64 {
213         my ($mnemonic,$arg)=@_;
214
215         if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
216             my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
217                                  |(($2&7)<<17)|(($2&8)<<4)
218                                  |(($3&7)<<1) |(($3&8)<<2);
219             $word |= 0x00010001  if ($mnemonic =~ "2");
220             # since ARMv7 instructions are always encoded little-endian.
221             # correct solution is to use .inst directive, but older
222             # assemblers don't implement it:-(
223             sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
224                         $word&0xff,($word>>8)&0xff,
225                         ($word>>16)&0xff,($word>>24)&0xff,
226                         $mnemonic,$arg;
227         }
228     }
229
230     foreach(split("\n",$code)) {
231         s/\b[wx]([0-9]+)\b/r$1/go;              # new->old registers
232         s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;   # new->old registers
233         s/\/\/\s?/@ /o;                         # new->old style commentary
234
235         # fix up remainig new-style suffixes
236         s/\],#[0-9]+/]!/o;
237
238         s/cclr\s+([^,]+),\s*([a-z]+)/mov$2      $1,#0/o                 or
239         s/vdup\.32\s+(.*)/unvdup32($1)/geo                              or
240         s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo                or
241         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo       or
242         s/^(\s+)b\./$1b/o                                               or
243         s/^(\s+)ret/$1bx\tlr/o;
244
245         print $_,"\n";
246     }
247 }
248
249 close STDOUT; # enforce flush