ARM assembly pack: make it Windows-friendly.
[openssl.git] / crypto / modes / asm / ghashv8-armx.pl
1 #! /usr/bin/env perl
2 # Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
18 #
19 # June 2014
20 #
21 # Initial version was developed in tight cooperation with Ard
22 # Biesheuvel of Linaro from bits-n-pieces from other assembly modules.
23 # Just like aesv8-armx.pl this module supports both AArch32 and
24 # AArch64 execution modes.
25 #
26 # July 2014
27 #
28 # Implement 2x aggregated reduction [see ghash-x86.pl for background
29 # information].
30 #
31 # November 2017
32 #
33 # AArch64 register bank to "accommodate" 4x aggregated reduction and
34 # improve performance by 20-70% depending on processor.
35 #
36 # Current performance in cycles per processed byte:
37 #
38 #               64-bit PMULL    32-bit PMULL    32-bit NEON(*)
39 # Apple A7      0.58            0.92            5.62
40 # Cortex-A53    0.85            1.01            8.39
41 # Cortex-A57    0.73            1.17            7.61
42 # Denver        0.51            0.65            6.02
43 # Mongoose      0.65            1.10            8.06
44 # Kryo          0.76            1.16            8.00
45 #
46 # (*)   presented for reference/comparison purposes;
47
48 $flavour = shift;
49 $output  = shift;
50
51 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
52 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
53 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
54 die "can't locate arm-xlate.pl";
55
56 open OUT,"| \"$^X\" $xlate $flavour $output";
57 *STDOUT=*OUT;
58
59 $Xi="x0";       # argument block
60 $Htbl="x1";
61 $inp="x2";
62 $len="x3";
63
64 $inc="x12";
65
66 {
67 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
68 my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
69 my $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
70
71 $code=<<___;
72 #include "arm_arch.h"
73
74 #if __ARM_MAX_ARCH__>=7
75 ___
76 $code.=".arch   armv8-a+crypto\n.text\n"        if ($flavour =~ /64/);
77 $code.=<<___                                    if ($flavour !~ /64/);
78 .fpu    neon
79 #ifdef __thumb2__
80 .syntax        unified
81 .thumb
82 # define INST(a,b,c,d) $_byte  c,0xef,a,b
83 #else
84 .code  32
85 # define INST(a,b,c,d) $_byte  a,b,c,0xf2
86 #endif
87
88 .text
89 ___
90
91 ################################################################################
92 # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
93 #
94 # input:        128-bit H - secret parameter E(K,0^128)
95 # output:       precomputed table filled with degrees of twisted H;
96 #               H is twisted to handle reverse bitness of GHASH;
97 #               only few of 16 slots of Htable[16] are used;
98 #               data is opaque to outside world (which allows to
99 #               optimize the code independently);
100 #
101 $code.=<<___;
102 .global gcm_init_v8
103 .type   gcm_init_v8,%function
104 .align  4
105 gcm_init_v8:
106         vld1.64         {$t1},[x1]              @ load input H
107         vmov.i8         $xC2,#0xe1
108         vshl.i64        $xC2,$xC2,#57           @ 0xc2.0
109         vext.8          $IN,$t1,$t1,#8
110         vshr.u64        $t2,$xC2,#63
111         vdup.32         $t1,${t1}[1]
112         vext.8          $t0,$t2,$xC2,#8         @ t0=0xc2....01
113         vshr.u64        $t2,$IN,#63
114         vshr.s32        $t1,$t1,#31             @ broadcast carry bit
115         vand            $t2,$t2,$t0
116         vshl.i64        $IN,$IN,#1
117         vext.8          $t2,$t2,$t2,#8
118         vand            $t0,$t0,$t1
119         vorr            $IN,$IN,$t2             @ H<<<=1
120         veor            $H,$IN,$t0              @ twisted H
121         vst1.64         {$H},[x0],#16           @ store Htable[0]
122
123         @ calculate H^2
124         vext.8          $t0,$H,$H,#8            @ Karatsuba pre-processing
125         vpmull.p64      $Xl,$H,$H
126         veor            $t0,$t0,$H
127         vpmull2.p64     $Xh,$H,$H
128         vpmull.p64      $Xm,$t0,$t0
129
130         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
131         veor            $t2,$Xl,$Xh
132         veor            $Xm,$Xm,$t1
133         veor            $Xm,$Xm,$t2
134         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase
135
136         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
137         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
138         veor            $Xl,$Xm,$t2
139
140         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase
141         vpmull.p64      $Xl,$Xl,$xC2
142         veor            $t2,$t2,$Xh
143         veor            $H2,$Xl,$t2
144
145         vext.8          $t1,$H2,$H2,#8          @ Karatsuba pre-processing
146         veor            $t1,$t1,$H2
147         vext.8          $Hhl,$t0,$t1,#8         @ pack Karatsuba pre-processed
148         vst1.64         {$Hhl-$H2},[x0],#32     @ store Htable[1..2]
149 ___
150 if ($flavour =~ /64/) {
151 my ($t3,$Yl,$Ym,$Yh) = map("q$_",(4..7));
152
153 $code.=<<___;
154         @ calculate H^3 and H^4
155         vpmull.p64      $Xl,$H, $H2
156          vpmull.p64     $Yl,$H2,$H2
157         vpmull2.p64     $Xh,$H, $H2
158          vpmull2.p64    $Yh,$H2,$H2
159         vpmull.p64      $Xm,$t0,$t1
160          vpmull.p64     $Ym,$t1,$t1
161
162         vext.8          $t0,$Xl,$Xh,#8          @ Karatsuba post-processing
163          vext.8         $t1,$Yl,$Yh,#8
164         veor            $t2,$Xl,$Xh
165         veor            $Xm,$Xm,$t0
166          veor           $t3,$Yl,$Yh
167          veor           $Ym,$Ym,$t1
168         veor            $Xm,$Xm,$t2
169         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase
170          veor           $Ym,$Ym,$t3
171          vpmull.p64     $t3,$Yl,$xC2
172
173         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
174          vmov           $Yh#lo,$Ym#hi
175         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
176          vmov           $Ym#hi,$Yl#lo
177         veor            $Xl,$Xm,$t2
178          veor           $Yl,$Ym,$t3
179
180         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase
181          vext.8         $t3,$Yl,$Yl,#8
182         vpmull.p64      $Xl,$Xl,$xC2
183          vpmull.p64     $Yl,$Yl,$xC2
184         veor            $t2,$t2,$Xh
185          veor           $t3,$t3,$Yh
186         veor            $H, $Xl,$t2             @ H^3
187          veor           $H2,$Yl,$t3             @ H^4
188
189         vext.8          $t0,$H, $H,#8           @ Karatsuba pre-processing
190          vext.8         $t1,$H2,$H2,#8
191         veor            $t0,$t0,$H
192          veor           $t1,$t1,$H2
193         vext.8          $Hhl,$t0,$t1,#8         @ pack Karatsuba pre-processed
194         vst1.64         {$H-$H2},[x0]           @ store Htable[3..5]
195 ___
196 }
197 $code.=<<___;
198         ret
199 .size   gcm_init_v8,.-gcm_init_v8
200 ___
201 ################################################################################
202 # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
203 #
204 # input:        Xi - current hash value;
205 #               Htable - table precomputed in gcm_init_v8;
206 # output:       Xi - next hash value Xi;
207 #
208 $code.=<<___;
209 .global gcm_gmult_v8
210 .type   gcm_gmult_v8,%function
211 .align  4
212 gcm_gmult_v8:
213         vld1.64         {$t1},[$Xi]             @ load Xi
214         vmov.i8         $xC2,#0xe1
215         vld1.64         {$H-$Hhl},[$Htbl]       @ load twisted H, ...
216         vshl.u64        $xC2,$xC2,#57
217 #ifndef __ARMEB__
218         vrev64.8        $t1,$t1
219 #endif
220         vext.8          $IN,$t1,$t1,#8
221
222         vpmull.p64      $Xl,$H,$IN              @ H.lo·Xi.lo
223         veor            $t1,$t1,$IN             @ Karatsuba pre-processing
224         vpmull2.p64     $Xh,$H,$IN              @ H.hi·Xi.hi
225         vpmull.p64      $Xm,$Hhl,$t1            @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
226
227         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
228         veor            $t2,$Xl,$Xh
229         veor            $Xm,$Xm,$t1
230         veor            $Xm,$Xm,$t2
231         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
232
233         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
234         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
235         veor            $Xl,$Xm,$t2
236
237         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
238         vpmull.p64      $Xl,$Xl,$xC2
239         veor            $t2,$t2,$Xh
240         veor            $Xl,$Xl,$t2
241
242 #ifndef __ARMEB__
243         vrev64.8        $Xl,$Xl
244 #endif
245         vext.8          $Xl,$Xl,$Xl,#8
246         vst1.64         {$Xl},[$Xi]             @ write out Xi
247
248         ret
249 .size   gcm_gmult_v8,.-gcm_gmult_v8
250 ___
251 ################################################################################
252 # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
253 #
254 # input:        table precomputed in gcm_init_v8;
255 #               current hash value Xi;
256 #               pointer to input data;
257 #               length of input data in bytes, but divisible by block size;
258 # output:       next hash value Xi;
259 #
260 $code.=<<___;
261 .global gcm_ghash_v8
262 .type   gcm_ghash_v8,%function
263 .align  4
264 gcm_ghash_v8:
265 ___
266 $code.=<<___    if ($flavour =~ /64/);
267         cmp             $len,#64
268         b.hs            .Lgcm_ghash_v8_4x
269 ___
270 $code.=<<___            if ($flavour !~ /64/);
271         vstmdb          sp!,{d8-d15}            @ 32-bit ABI says so
272 ___
273 $code.=<<___;
274         vld1.64         {$Xl},[$Xi]             @ load [rotated] Xi
275                                                 @ "[rotated]" means that
276                                                 @ loaded value would have
277                                                 @ to be rotated in order to
278                                                 @ make it appear as in
279                                                 @ algorithm specification
280         subs            $len,$len,#32           @ see if $len is 32 or larger
281         mov             $inc,#16                @ $inc is used as post-
282                                                 @ increment for input pointer;
283                                                 @ as loop is modulo-scheduled
284                                                 @ $inc is zeroed just in time
285                                                 @ to preclude overstepping
286                                                 @ inp[len], which means that
287                                                 @ last block[s] are actually
288                                                 @ loaded twice, but last
289                                                 @ copy is not processed
290         vld1.64         {$H-$Hhl},[$Htbl],#32   @ load twisted H, ..., H^2
291         vmov.i8         $xC2,#0xe1
292         vld1.64         {$H2},[$Htbl]
293         cclr            $inc,eq                 @ is it time to zero $inc?
294         vext.8          $Xl,$Xl,$Xl,#8          @ rotate Xi
295         vld1.64         {$t0},[$inp],#16        @ load [rotated] I[0]
296         vshl.u64        $xC2,$xC2,#57           @ compose 0xc2.0 constant
297 #ifndef __ARMEB__
298         vrev64.8        $t0,$t0
299         vrev64.8        $Xl,$Xl
300 #endif
301         vext.8          $IN,$t0,$t0,#8          @ rotate I[0]
302         b.lo            .Lodd_tail_v8           @ $len was less than 32
303 ___
304 { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
305         #######
306         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
307         #       [(H*Ii+1) + (H*Xi+1)] mod P =
308         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
309         #
310 $code.=<<___;
311         vld1.64         {$t1},[$inp],$inc       @ load [rotated] I[1]
312 #ifndef __ARMEB__
313         vrev64.8        $t1,$t1
314 #endif
315         vext.8          $In,$t1,$t1,#8
316         veor            $IN,$IN,$Xl             @ I[i]^=Xi
317         vpmull.p64      $Xln,$H,$In             @ H·Ii+1
318         veor            $t1,$t1,$In             @ Karatsuba pre-processing
319         vpmull2.p64     $Xhn,$H,$In
320         b               .Loop_mod2x_v8
321
322 .align  4
323 .Loop_mod2x_v8:
324         vext.8          $t2,$IN,$IN,#8
325         subs            $len,$len,#32           @ is there more data?
326         vpmull.p64      $Xl,$H2,$IN             @ H^2.lo·Xi.lo
327         cclr            $inc,lo                 @ is it time to zero $inc?
328
329          vpmull.p64     $Xmn,$Hhl,$t1
330         veor            $t2,$t2,$IN             @ Karatsuba pre-processing
331         vpmull2.p64     $Xh,$H2,$IN             @ H^2.hi·Xi.hi
332         veor            $Xl,$Xl,$Xln            @ accumulate
333         vpmull2.p64     $Xm,$Hhl,$t2            @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
334          vld1.64        {$t0},[$inp],$inc       @ load [rotated] I[i+2]
335
336         veor            $Xh,$Xh,$Xhn
337          cclr           $inc,eq                 @ is it time to zero $inc?
338         veor            $Xm,$Xm,$Xmn
339
340         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
341         veor            $t2,$Xl,$Xh
342         veor            $Xm,$Xm,$t1
343          vld1.64        {$t1},[$inp],$inc       @ load [rotated] I[i+3]
344 #ifndef __ARMEB__
345          vrev64.8       $t0,$t0
346 #endif
347         veor            $Xm,$Xm,$t2
348         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
349
350 #ifndef __ARMEB__
351          vrev64.8       $t1,$t1
352 #endif
353         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
354         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
355          vext.8         $In,$t1,$t1,#8
356          vext.8         $IN,$t0,$t0,#8
357         veor            $Xl,$Xm,$t2
358          vpmull.p64     $Xln,$H,$In             @ H·Ii+1
359         veor            $IN,$IN,$Xh             @ accumulate $IN early
360
361         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
362         vpmull.p64      $Xl,$Xl,$xC2
363         veor            $IN,$IN,$t2
364          veor           $t1,$t1,$In             @ Karatsuba pre-processing
365         veor            $IN,$IN,$Xl
366          vpmull2.p64    $Xhn,$H,$In
367         b.hs            .Loop_mod2x_v8          @ there was at least 32 more bytes
368
369         veor            $Xh,$Xh,$t2
370         vext.8          $IN,$t0,$t0,#8          @ re-construct $IN
371         adds            $len,$len,#32           @ re-construct $len
372         veor            $Xl,$Xl,$Xh             @ re-construct $Xl
373         b.eq            .Ldone_v8               @ is $len zero?
374 ___
375 }
376 $code.=<<___;
377 .Lodd_tail_v8:
378         vext.8          $t2,$Xl,$Xl,#8
379         veor            $IN,$IN,$Xl             @ inp^=Xi
380         veor            $t1,$t0,$t2             @ $t1 is rotated inp^Xi
381
382         vpmull.p64      $Xl,$H,$IN              @ H.lo·Xi.lo
383         veor            $t1,$t1,$IN             @ Karatsuba pre-processing
384         vpmull2.p64     $Xh,$H,$IN              @ H.hi·Xi.hi
385         vpmull.p64      $Xm,$Hhl,$t1            @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
386
387         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
388         veor            $t2,$Xl,$Xh
389         veor            $Xm,$Xm,$t1
390         veor            $Xm,$Xm,$t2
391         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
392
393         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
394         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
395         veor            $Xl,$Xm,$t2
396
397         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
398         vpmull.p64      $Xl,$Xl,$xC2
399         veor            $t2,$t2,$Xh
400         veor            $Xl,$Xl,$t2
401
402 .Ldone_v8:
403 #ifndef __ARMEB__
404         vrev64.8        $Xl,$Xl
405 #endif
406         vext.8          $Xl,$Xl,$Xl,#8
407         vst1.64         {$Xl},[$Xi]             @ write out Xi
408
409 ___
410 $code.=<<___            if ($flavour !~ /64/);
411         vldmia          sp!,{d8-d15}            @ 32-bit ABI says so
412 ___
413 $code.=<<___;
414         ret
415 .size   gcm_ghash_v8,.-gcm_ghash_v8
416 ___
417
418 if ($flavour =~ /64/) {                         # 4x subroutine
419 my ($I0,$j1,$j2,$j3,
420     $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23));
421
422 $code.=<<___;
423 .type   gcm_ghash_v8_4x,%function
424 .align  4
425 gcm_ghash_v8_4x:
426 .Lgcm_ghash_v8_4x:
427         vld1.64         {$Xl},[$Xi]             @ load [rotated] Xi
428         vld1.64         {$H-$H2},[$Htbl],#48    @ load twisted H, ..., H^2
429         vmov.i8         $xC2,#0xe1
430         vld1.64         {$H3-$H4},[$Htbl]       @ load twisted H^3, ..., H^4
431         vshl.u64        $xC2,$xC2,#57           @ compose 0xc2.0 constant
432
433         vld1.64         {$I0-$j3},[$inp],#64
434 #ifndef __ARMEB__
435         vrev64.8        $Xl,$Xl
436         vrev64.8        $j1,$j1
437         vrev64.8        $j2,$j2
438         vrev64.8        $j3,$j3
439         vrev64.8        $I0,$I0
440 #endif
441         vext.8          $I3,$j3,$j3,#8
442         vext.8          $I2,$j2,$j2,#8
443         vext.8          $I1,$j1,$j1,#8
444
445         vpmull.p64      $Yl,$H,$I3              @ H·Ii+3
446         veor            $j3,$j3,$I3
447         vpmull2.p64     $Yh,$H,$I3
448         vpmull.p64      $Ym,$Hhl,$j3
449
450         vpmull.p64      $t0,$H2,$I2             @ H^2·Ii+2
451         veor            $j2,$j2,$I2
452         vpmull2.p64     $I2,$H2,$I2
453         vpmull2.p64     $j2,$Hhl,$j2
454
455         veor            $Yl,$Yl,$t0
456         veor            $Yh,$Yh,$I2
457         veor            $Ym,$Ym,$j2
458
459         vpmull.p64      $j3,$H3,$I1             @ H^3·Ii+1
460         veor            $j1,$j1,$I1
461         vpmull2.p64     $I1,$H3,$I1
462         vpmull.p64      $j1,$H34,$j1
463
464         veor            $Yl,$Yl,$j3
465         veor            $Yh,$Yh,$I1
466         veor            $Ym,$Ym,$j1
467
468         subs            $len,$len,#128
469         b.lo            .Ltail4x
470
471         b               .Loop4x
472
473 .align  4
474 .Loop4x:
475         veor            $t0,$I0,$Xl
476          vld1.64        {$I0-$j3},[$inp],#64
477         vext.8          $IN,$t0,$t0,#8
478 #ifndef __ARMEB__
479          vrev64.8       $j1,$j1
480          vrev64.8       $j2,$j2
481          vrev64.8       $j3,$j3
482          vrev64.8       $I0,$I0
483 #endif
484
485         vpmull.p64      $Xl,$H4,$IN             @ H^4·(Xi+Ii)
486         veor            $t0,$t0,$IN
487         vpmull2.p64     $Xh,$H4,$IN
488          vext.8         $I3,$j3,$j3,#8
489         vpmull2.p64     $Xm,$H34,$t0
490
491         veor            $Xl,$Xl,$Yl
492         veor            $Xh,$Xh,$Yh
493          vext.8         $I2,$j2,$j2,#8
494         veor            $Xm,$Xm,$Ym
495          vext.8         $I1,$j1,$j1,#8
496
497         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
498         veor            $t2,$Xl,$Xh
499          vpmull.p64     $Yl,$H,$I3              @ H·Ii+3
500          veor           $j3,$j3,$I3
501         veor            $Xm,$Xm,$t1
502          vpmull2.p64    $Yh,$H,$I3
503         veor            $Xm,$Xm,$t2
504          vpmull.p64     $Ym,$Hhl,$j3
505
506         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
507         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
508         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
509          vpmull.p64     $t0,$H2,$I2             @ H^2·Ii+2
510          veor           $j2,$j2,$I2
511          vpmull2.p64    $I2,$H2,$I2
512         veor            $Xl,$Xm,$t2
513          vpmull2.p64    $j2,$Hhl,$j2
514
515          veor           $Yl,$Yl,$t0
516          veor           $Yh,$Yh,$I2
517          veor           $Ym,$Ym,$j2
518
519         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
520         vpmull.p64      $Xl,$Xl,$xC2
521          vpmull.p64     $j3,$H3,$I1             @ H^3·Ii+1
522          veor           $j1,$j1,$I1
523         veor            $t2,$t2,$Xh
524          vpmull2.p64    $I1,$H3,$I1
525          vpmull.p64     $j1,$H34,$j1
526
527         veor            $Xl,$Xl,$t2
528          veor           $Yl,$Yl,$j3
529          veor           $Yh,$Yh,$I1
530         vext.8          $Xl,$Xl,$Xl,#8
531          veor           $Ym,$Ym,$j1
532
533         subs            $len,$len,#64
534         b.hs            .Loop4x
535
536 .Ltail4x:
537         veor            $t0,$I0,$Xl
538         vext.8          $IN,$t0,$t0,#8
539
540         vpmull.p64      $Xl,$H4,$IN             @ H^4·(Xi+Ii)
541         veor            $t0,$t0,$IN
542         vpmull2.p64     $Xh,$H4,$IN
543         vpmull2.p64     $Xm,$H34,$t0
544
545         veor            $Xl,$Xl,$Yl
546         veor            $Xh,$Xh,$Yh
547         veor            $Xm,$Xm,$Ym
548
549         adds            $len,$len,#64
550         b.eq            .Ldone4x
551
552         cmp             $len,#32
553         b.lo            .Lone
554         b.eq            .Ltwo
555 .Lthree:
556         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
557         veor            $t2,$Xl,$Xh
558         veor            $Xm,$Xm,$t1
559          vld1.64        {$I0-$j2},[$inp]
560         veor            $Xm,$Xm,$t2
561 #ifndef __ARMEB__
562          vrev64.8       $j1,$j1
563          vrev64.8       $j2,$j2
564          vrev64.8       $I0,$I0
565 #endif
566
567         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
568         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
569         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
570          vext.8         $I2,$j2,$j2,#8
571          vext.8         $I1,$j1,$j1,#8
572         veor            $Xl,$Xm,$t2
573
574          vpmull.p64     $Yl,$H,$I2              @ H·Ii+2
575          veor           $j2,$j2,$I2
576
577         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
578         vpmull.p64      $Xl,$Xl,$xC2
579         veor            $t2,$t2,$Xh
580          vpmull2.p64    $Yh,$H,$I2
581          vpmull.p64     $Ym,$Hhl,$j2
582         veor            $Xl,$Xl,$t2
583          vpmull.p64     $j3,$H2,$I1             @ H^2·Ii+1
584          veor           $j1,$j1,$I1
585         vext.8          $Xl,$Xl,$Xl,#8
586
587          vpmull2.p64    $I1,$H2,$I1
588         veor            $t0,$I0,$Xl
589          vpmull2.p64    $j1,$Hhl,$j1
590         vext.8          $IN,$t0,$t0,#8
591
592          veor           $Yl,$Yl,$j3
593          veor           $Yh,$Yh,$I1
594          veor           $Ym,$Ym,$j1
595
596         vpmull.p64      $Xl,$H3,$IN             @ H^3·(Xi+Ii)
597         veor            $t0,$t0,$IN
598         vpmull2.p64     $Xh,$H3,$IN
599         vpmull.p64      $Xm,$H34,$t0
600
601         veor            $Xl,$Xl,$Yl
602         veor            $Xh,$Xh,$Yh
603         veor            $Xm,$Xm,$Ym
604         b               .Ldone4x
605
606 .align  4
607 .Ltwo:
608         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
609         veor            $t2,$Xl,$Xh
610         veor            $Xm,$Xm,$t1
611          vld1.64        {$I0-$j1},[$inp]
612         veor            $Xm,$Xm,$t2
613 #ifndef __ARMEB__
614          vrev64.8       $j1,$j1
615          vrev64.8       $I0,$I0
616 #endif
617
618         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
619         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
620         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
621          vext.8         $I1,$j1,$j1,#8
622         veor            $Xl,$Xm,$t2
623
624         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
625         vpmull.p64      $Xl,$Xl,$xC2
626         veor            $t2,$t2,$Xh
627         veor            $Xl,$Xl,$t2
628         vext.8          $Xl,$Xl,$Xl,#8
629
630          vpmull.p64     $Yl,$H,$I1              @ H·Ii+1
631          veor           $j1,$j1,$I1
632
633         veor            $t0,$I0,$Xl
634         vext.8          $IN,$t0,$t0,#8
635
636          vpmull2.p64    $Yh,$H,$I1
637          vpmull.p64     $Ym,$Hhl,$j1
638
639         vpmull.p64      $Xl,$H2,$IN             @ H^2·(Xi+Ii)
640         veor            $t0,$t0,$IN
641         vpmull2.p64     $Xh,$H2,$IN
642         vpmull2.p64     $Xm,$Hhl,$t0
643
644         veor            $Xl,$Xl,$Yl
645         veor            $Xh,$Xh,$Yh
646         veor            $Xm,$Xm,$Ym
647         b               .Ldone4x
648
649 .align  4
650 .Lone:
651         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
652         veor            $t2,$Xl,$Xh
653         veor            $Xm,$Xm,$t1
654          vld1.64        {$I0},[$inp]
655         veor            $Xm,$Xm,$t2
656 #ifndef __ARMEB__
657          vrev64.8       $I0,$I0
658 #endif
659
660         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
661         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
662         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
663         veor            $Xl,$Xm,$t2
664
665         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
666         vpmull.p64      $Xl,$Xl,$xC2
667         veor            $t2,$t2,$Xh
668         veor            $Xl,$Xl,$t2
669         vext.8          $Xl,$Xl,$Xl,#8
670
671         veor            $t0,$I0,$Xl
672         vext.8          $IN,$t0,$t0,#8
673
674         vpmull.p64      $Xl,$H,$IN
675         veor            $t0,$t0,$IN
676         vpmull2.p64     $Xh,$H,$IN
677         vpmull.p64      $Xm,$Hhl,$t0
678
679 .Ldone4x:
680         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
681         veor            $t2,$Xl,$Xh
682         veor            $Xm,$Xm,$t1
683         veor            $Xm,$Xm,$t2
684
685         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
686         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
687         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
688         veor            $Xl,$Xm,$t2
689
690         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
691         vpmull.p64      $Xl,$Xl,$xC2
692         veor            $t2,$t2,$Xh
693         veor            $Xl,$Xl,$t2
694         vext.8          $Xl,$Xl,$Xl,#8
695
696 #ifndef __ARMEB__
697         vrev64.8        $Xl,$Xl
698 #endif
699         vst1.64         {$Xl},[$Xi]             @ write out Xi
700
701         ret
702 .size   gcm_ghash_v8_4x,.-gcm_ghash_v8_4x
703 ___
704
705 }
706 }
707
708 $code.=<<___;
709 .asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
710 .align  2
711 #endif
712 ___
713
714 if ($flavour =~ /64/) {                 ######## 64-bit code
715     sub unvmov {
716         my $arg=shift;
717
718         $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
719         sprintf "ins    v%d.d[%d],v%d.d[%d]",$1<8?$1:$1+8,($2 eq "lo")?0:1,
720                                              $3<8?$3:$3+8,($4 eq "lo")?0:1;
721     }
722     foreach(split("\n",$code)) {
723         s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o     or
724         s/vmov\.i8/movi/o               or      # fix up legacy mnemonics
725         s/vmov\s+(.*)/unvmov($1)/geo    or
726         s/vext\.8/ext/o                 or
727         s/vshr\.s/sshr\.s/o             or
728         s/vshr/ushr/o                   or
729         s/^(\s+)v/$1/o                  or      # strip off v prefix
730         s/\bbx\s+lr\b/ret/o;
731
732         s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;  # old->new registers
733         s/@\s/\/\//o;                           # old->new style commentary
734
735         # fix up remaining legacy suffixes
736         s/\.[ui]?8(\s)/$1/o;
737         s/\.[uis]?32//o and s/\.16b/\.4s/go;
738         m/\.p64/o and s/\.16b/\.1q/o;           # 1st pmull argument
739         m/l\.p64/o and s/\.16b/\.1d/go;         # 2nd and 3rd pmull arguments
740         s/\.[uisp]?64//o and s/\.16b/\.2d/go;
741         s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
742
743         print $_,"\n";
744     }
745 } else {                                ######## 32-bit code
746     sub unvdup32 {
747         my $arg=shift;
748
749         $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
750         sprintf "vdup.32        q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
751     }
752     sub unvpmullp64 {
753         my ($mnemonic,$arg)=@_;
754
755         if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
756             my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
757                                  |(($2&7)<<17)|(($2&8)<<4)
758                                  |(($3&7)<<1) |(($3&8)<<2);
759             $word |= 0x00010001  if ($mnemonic =~ "2");
760             # since ARMv7 instructions are always encoded little-endian.
761             # correct solution is to use .inst directive, but older
762             # assemblers don't implement it:-(
763             sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
764                         $word&0xff,($word>>8)&0xff,
765                         ($word>>16)&0xff,($word>>24)&0xff,
766                         $mnemonic,$arg;
767         }
768     }
769
770     foreach(split("\n",$code)) {
771         s/\b[wx]([0-9]+)\b/r$1/go;              # new->old registers
772         s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;   # new->old registers
773         s/\/\/\s?/@ /o;                         # new->old style commentary
774
775         # fix up remaining new-style suffixes
776         s/\],#[0-9]+/]!/o;
777
778         s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2     $1,#0/o                 or
779         s/vdup\.32\s+(.*)/unvdup32($1)/geo                              or
780         s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo                or
781         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo       or
782         s/^(\s+)b\./$1b/o                                               or
783         s/^(\s+)ret/$1bx\tlr/o;
784
785         if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
786             print "     it      $2\n";
787         }
788
789         print $_,"\n";
790     }
791 }
792
793 close STDOUT; # enforce flush