ARMv8 assembly pack: add Cortex performance numbers.
[openssl.git] / crypto / modes / asm / ghashv8-armx.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
11 #
12 # June 2014
13 #
14 # Initial version was developed in tight cooperation with Ard
15 # Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
16 # other assembly modules. Just like aesv8-armx.pl this module
17 # supports both AArch32 and AArch64 execution modes.
18 #
19 # Current performance in cycles per processed byte:
20 #
21 #               PMULL[2]        32-bit NEON(*)
22 # Apple A7      1.76            5.62
23 # Cortex-A53    1.45            8.39
24 # Cortex-A57    2.22            7.61
25 #
26 # (*)   presented for reference/comparison purposes;
27
28 $flavour = shift;
29 open STDOUT,">".shift;
30
31 $Xi="x0";       # argument block
32 $Htbl="x1";
33 $inp="x2";
34 $len="x3";
35
36 $inc="x12";
37
38 {
39 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
40 my ($t0,$t1,$t2,$t3,$H,$Hhl)=map("q$_",(8..14));
41
42 $code=<<___;
43 #include "arm_arch.h"
44
45 .text
46 ___
47 $code.=".arch   armv8-a+crypto\n"       if ($flavour =~ /64/);
48 $code.=".fpu    neon\n.code     32\n"   if ($flavour !~ /64/);
49
50 $code.=<<___;
51 .global gcm_init_v8
52 .type   gcm_init_v8,%function
53 .align  4
54 gcm_init_v8:
55         vld1.64         {$t1},[x1]              @ load H
56         vmov.i8         $t0,#0xe1
57         vext.8          $IN,$t1,$t1,#8
58         vshl.i64        $t0,$t0,#57
59         vshr.u64        $t2,$t0,#63
60         vext.8          $t0,$t2,$t0,#8          @ t0=0xc2....01
61         vdup.32         $t1,${t1}[1]
62         vshr.u64        $t3,$IN,#63
63         vshr.s32        $t1,$t1,#31             @ broadcast carry bit
64         vand            $t3,$t3,$t0
65         vshl.i64        $IN,$IN,#1
66         vext.8          $t3,$t3,$t3,#8
67         vand            $t0,$t0,$t1
68         vorr            $IN,$IN,$t3             @ H<<<=1
69         veor            $IN,$IN,$t0             @ twisted H
70         vst1.64         {$IN},[x0]
71
72         ret
73 .size   gcm_init_v8,.-gcm_init_v8
74
75 .global gcm_gmult_v8
76 .type   gcm_gmult_v8,%function
77 .align  4
78 gcm_gmult_v8:
79         vld1.64         {$t1},[$Xi]             @ load Xi
80         vmov.i8         $t3,#0xe1
81         vld1.64         {$H},[$Htbl]            @ load twisted H
82         vshl.u64        $t3,$t3,#57
83 #ifndef __ARMEB__
84         vrev64.8        $t1,$t1
85 #endif
86         vext.8          $Hhl,$H,$H,#8
87         mov             $len,#0
88         vext.8          $IN,$t1,$t1,#8
89         mov             $inc,#0
90         veor            $Hhl,$Hhl,$H            @ Karatsuba pre-processing
91         mov             $inp,$Xi
92         b               .Lgmult_v8
93 .size   gcm_gmult_v8,.-gcm_gmult_v8
94
95 .global gcm_ghash_v8
96 .type   gcm_ghash_v8,%function
97 .align  4
98 gcm_ghash_v8:
99         vld1.64         {$Xl},[$Xi]             @ load [rotated] Xi
100         subs            $len,$len,#16
101         vmov.i8         $t3,#0xe1
102         mov             $inc,#16
103         vld1.64         {$H},[$Htbl]            @ load twisted H
104         cclr            $inc,eq
105         vext.8          $Xl,$Xl,$Xl,#8
106         vshl.u64        $t3,$t3,#57
107         vld1.64         {$t1},[$inp],$inc       @ load [rotated] inp
108         vext.8          $Hhl,$H,$H,#8
109 #ifndef __ARMEB__
110         vrev64.8        $Xl,$Xl
111         vrev64.8        $t1,$t1
112 #endif
113         veor            $Hhl,$Hhl,$H            @ Karatsuba pre-processing
114         vext.8          $IN,$t1,$t1,#8
115         b               .Loop_v8
116
117 .align  4
118 .Loop_v8:
119         vext.8          $t2,$Xl,$Xl,#8
120         veor            $IN,$IN,$Xl             @ inp^=Xi
121         veor            $t1,$t1,$t2             @ $t1 is rotated inp^Xi
122
123 .Lgmult_v8:
124         vpmull.p64      $Xl,$H,$IN              @ H.lo·Xi.lo
125         veor            $t1,$t1,$IN             @ Karatsuba pre-processing
126         vpmull2.p64     $Xh,$H,$IN              @ H.hi·Xi.hi
127         subs            $len,$len,#16
128         vpmull.p64      $Xm,$Hhl,$t1            @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
129         cclr            $inc,eq
130
131         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
132         veor            $t2,$Xl,$Xh
133         veor            $Xm,$Xm,$t1
134          vld1.64        {$t1},[$inp],$inc       @ load [rotated] inp
135         veor            $Xm,$Xm,$t2
136         vpmull.p64      $t2,$Xl,$t3             @ 1st phase
137
138         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
139         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
140 #ifndef __ARMEB__
141          vrev64.8       $t1,$t1
142 #endif
143         veor            $Xl,$Xm,$t2
144          vext.8         $IN,$t1,$t1,#8
145
146         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase
147         vpmull.p64      $Xl,$Xl,$t3
148         veor            $t2,$t2,$Xh
149         veor            $Xl,$Xl,$t2
150         b.hs            .Loop_v8
151
152 #ifndef __ARMEB__
153         vrev64.8        $Xl,$Xl
154 #endif
155         vext.8          $Xl,$Xl,$Xl,#8
156         vst1.64         {$Xl},[$Xi]             @ write out Xi
157
158         ret
159 .size   gcm_ghash_v8,.-gcm_ghash_v8
160 ___
161 }
162 $code.=<<___;
163 .asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
164 .align  2
165 ___
166
167 if ($flavour =~ /64/) {                 ######## 64-bit code
168     sub unvmov {
169         my $arg=shift;
170
171         $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
172         sprintf "ins    v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
173     }
174     foreach(split("\n",$code)) {
175         s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o     or
176         s/vmov\.i8/movi/o               or      # fix up legacy mnemonics
177         s/vmov\s+(.*)/unvmov($1)/geo    or
178         s/vext\.8/ext/o                 or
179         s/vshr\.s/sshr\.s/o             or
180         s/vshr/ushr/o                   or
181         s/^(\s+)v/$1/o                  or      # strip off v prefix
182         s/\bbx\s+lr\b/ret/o;
183
184         s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;  # old->new registers
185         s/@\s/\/\//o;                           # old->new style commentary
186
187         # fix up remainig legacy suffixes
188         s/\.[ui]?8(\s)/$1/o;
189         s/\.[uis]?32//o and s/\.16b/\.4s/go;
190         m/\.p64/o and s/\.16b/\.1q/o;           # 1st pmull argument
191         m/l\.p64/o and s/\.16b/\.1d/go;         # 2nd and 3rd pmull arguments
192         s/\.[uisp]?64//o and s/\.16b/\.2d/go;
193         s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
194
195         print $_,"\n";
196     }
197 } else {                                ######## 32-bit code
198     sub unvdup32 {
199         my $arg=shift;
200
201         $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
202         sprintf "vdup.32        q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
203     }
204     sub unvpmullp64 {
205         my ($mnemonic,$arg)=@_;
206
207         if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
208             my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
209                                  |(($2&7)<<17)|(($2&8)<<4)
210                                  |(($3&7)<<1) |(($3&8)<<2);
211             $word |= 0x00010001  if ($mnemonic =~ "2");
212             # since ARMv7 instructions are always encoded little-endian.
213             # correct solution is to use .inst directive, but older
214             # assemblers don't implement it:-(
215             sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
216                         $word&0xff,($word>>8)&0xff,
217                         ($word>>16)&0xff,($word>>24)&0xff,
218                         $mnemonic,$arg;
219         }
220     }
221
222     foreach(split("\n",$code)) {
223         s/\b[wx]([0-9]+)\b/r$1/go;              # new->old registers
224         s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;   # new->old registers
225         s/\/\/\s?/@ /o;                         # new->old style commentary
226
227         # fix up remainig new-style suffixes
228         s/\],#[0-9]+/]!/o;
229
230         s/cclr\s+([^,]+),\s*([a-z]+)/mov$2      $1,#0/o                 or
231         s/vdup\.32\s+(.*)/unvdup32($1)/geo                              or
232         s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo                or
233         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo       or
234         s/^(\s+)b\./$1b/o                                               or
235         s/^(\s+)ret/$1bx\tlr/o;
236
237         print $_,"\n";
238     }
239 }
240
241 close STDOUT; # enforce flush