2 # Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
21 # Initial version was developed in tight cooperation with Ard
22 # Biesheuvel of Linaro from bits-n-pieces from other assembly modules.
23 # Just like aesv8-armx.pl this module supports both AArch32 and
24 # AArch64 execution modes.
28 # Implement 2x aggregated reduction [see ghash-x86.pl for background
33 # AArch64 register bank to "accommodate" 4x aggregated reduction and
34 # improve performance by 20-70% depending on processor.
36 # Current performance in cycles per processed byte:
38 # 64-bit PMULL 32-bit PMULL 32-bit NEON(*)
39 # Apple A7 0.58 0.92 5.62
40 # Cortex-A53 0.85 1.01 8.39
41 # Cortex-A57 0.73 1.17 7.61
42 # Denver 0.51 0.65 6.02
43 # Mongoose 0.65 1.10 8.06
47 # (*) presented for reference/comparison purposes;
49 # $output is the last argument if it looks like a file (it has an extension)
50 # $flavour is the first argument if it doesn't look like a file
51 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
52 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
54 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
55 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
56 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
57 die "can't locate arm-xlate.pl";
59 open OUT,"| \"$^X\" $xlate $flavour \"$output\""
60 or die "can't call $xlate: $!";
63 $Xi="x0"; # argument block
71 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
72 my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
73 my $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
78 #if __ARM_MAX_ARCH__>=7
80 $code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
81 $code.=<<___ if ($flavour !~ /64/);
86 # define INST(a,b,c,d) $_byte c,0xef,a,b
89 # define INST(a,b,c,d) $_byte a,b,c,0xf2
95 ################################################################################
96 # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
98 # input: 128-bit H - secret parameter E(K,0^128)
99 # output: precomputed table filled with degrees of twisted H;
100 # H is twisted to handle reverse bitness of GHASH;
101 # only few of 16 slots of Htable[16] are used;
102 # data is opaque to outside world (which allows to
103 # optimize the code independently);
107 .type gcm_init_v8,%function
111 $code.=<<___ if ($flavour =~ /64/);
112 AARCH64_VALID_CALL_TARGET
115 vld1.64 {$t1},[x1] @ load input H
117 vshl.i64 $xC2,$xC2,#57 @ 0xc2.0
118 vext.8 $IN,$t1,$t1,#8
119 vshr.u64 $t2,$xC2,#63
121 vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01
123 vshr.s32 $t1,$t1,#31 @ broadcast carry bit
126 vext.8 $t2,$t2,$t2,#8
128 vorr $IN,$IN,$t2 @ H<<<=1
129 veor $H,$IN,$t0 @ twisted H
130 vst1.64 {$H},[x0],#16 @ store Htable[0]
133 vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing
136 vpmull2.p64 $Xh,$H,$H
137 vpmull.p64 $Xm,$t0,$t0
139 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
143 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
145 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
146 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
149 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
150 vpmull.p64 $Xl,$Xl,$xC2
154 vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing
156 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed
157 vst1.64 {$Hhl-$H2},[x0],#32 @ store Htable[1..2]
159 if ($flavour =~ /64/) {
160 my ($t3,$Yl,$Ym,$Yh) = map("q$_",(4..7));
161 my ($H3,$H34k,$H4,$H5,$H56k,$H6,$H7,$H78k,$H8) = map("q$_",(15..23));
164 @ calculate H^3 and H^4
165 vpmull.p64 $Xl,$H, $H2
166 vpmull.p64 $Yl,$H2,$H2
167 vpmull2.p64 $Xh,$H, $H2
168 vpmull2.p64 $Yh,$H2,$H2
169 vpmull.p64 $Xm,$t0,$t1
170 vpmull.p64 $Ym,$t1,$t1
172 vext.8 $t0,$Xl,$Xh,#8 @ Karatsuba post-processing
173 vext.8 $t1,$Yl,$Yh,#8
179 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
181 vpmull.p64 $t3,$Yl,$xC2
183 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
185 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
190 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
191 vext.8 $t3,$Yl,$Yl,#8
192 vpmull.p64 $Xl,$Xl,$xC2
193 vpmull.p64 $Yl,$Yl,$xC2
196 veor $H3, $Xl,$t2 @ H^3
197 veor $H4,$Yl,$t3 @ H^4
199 vext.8 $t0,$H3, $H3,#8 @ Karatsuba pre-processing
200 vext.8 $t1,$H4,$H4,#8
201 vext.8 $t2,$H2,$H2,#8
205 vext.8 $H34k,$t0,$t1,#8 @ pack Karatsuba pre-processed
206 vst1.64 {$H3-$H4},[x0],#48 @ store Htable[3..5]
208 @ calculate H^5 and H^6
209 vpmull.p64 $Xl,$H2, $H3
210 vpmull.p64 $Yl,$H3,$H3
211 vpmull2.p64 $Xh,$H2, $H3
212 vpmull2.p64 $Yh,$H3,$H3
213 vpmull.p64 $Xm,$t0,$t2
214 vpmull.p64 $Ym,$t0,$t0
216 vext.8 $t0,$Xl,$Xh,#8 @ Karatsuba post-processing
217 vext.8 $t1,$Yl,$Yh,#8
223 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
225 vpmull.p64 $t3,$Yl,$xC2
227 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
229 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
234 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
235 vext.8 $t3,$Yl,$Yl,#8
236 vpmull.p64 $Xl,$Xl,$xC2
237 vpmull.p64 $Yl,$Yl,$xC2
240 veor $H5,$Xl,$t2 @ H^5
241 veor $H6,$Yl,$t3 @ H^6
243 vext.8 $t0,$H5, $H5,#8 @ Karatsuba pre-processing
244 vext.8 $t1,$H6,$H6,#8
245 vext.8 $t2,$H2,$H2,#8
249 vext.8 $H56k,$t0,$t1,#8 @ pack Karatsuba pre-processed
250 vst1.64 {$H5-$H6},[x0],#48 @ store Htable[6..8]
252 @ calculate H^7 and H^8
253 vpmull.p64 $Xl,$H2,$H5
254 vpmull.p64 $Yl,$H2,$H6
255 vpmull2.p64 $Xh,$H2,$H5
256 vpmull2.p64 $Yh,$H2,$H6
257 vpmull.p64 $Xm,$t0,$t2
258 vpmull.p64 $Ym,$t1,$t2
260 vext.8 $t0,$Xl,$Xh,#8 @ Karatsuba post-processing
261 vext.8 $t1,$Yl,$Yh,#8
267 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
269 vpmull.p64 $t3,$Yl,$xC2
271 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
273 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
278 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
279 vext.8 $t3,$Yl,$Yl,#8
280 vpmull.p64 $Xl,$Xl,$xC2
281 vpmull.p64 $Yl,$Yl,$xC2
284 veor $H7,$Xl,$t2 @ H^7
285 veor $H8,$Yl,$t3 @ H^8
287 vext.8 $t0,$H7,$H7,#8 @ Karatsuba pre-processing
288 vext.8 $t1,$H8,$H8,#8
291 vext.8 $H78k,$t0,$t1,#8 @ pack Karatsuba pre-processed
292 vst1.64 {$H7-$H8},[x0] @ store Htable[9..11]
297 .size gcm_init_v8,.-gcm_init_v8
299 ################################################################################
300 # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
302 # input: Xi - current hash value;
303 # Htable - table precomputed in gcm_init_v8;
304 # output: Xi - next hash value Xi;
308 .type gcm_gmult_v8,%function
312 $code.=<<___ if ($flavour =~ /64/);
313 AARCH64_VALID_CALL_TARGET
316 vld1.64 {$t1},[$Xi] @ load Xi
318 vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ...
319 vshl.u64 $xC2,$xC2,#57
323 vext.8 $IN,$t1,$t1,#8
325 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
326 veor $t1,$t1,$IN @ Karatsuba pre-processing
327 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
328 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
330 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
334 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
336 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
337 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
340 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
341 vpmull.p64 $Xl,$Xl,$xC2
348 vext.8 $Xl,$Xl,$Xl,#8
349 vst1.64 {$Xl},[$Xi] @ write out Xi
352 .size gcm_gmult_v8,.-gcm_gmult_v8
354 ################################################################################
355 # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
357 # input: table precomputed in gcm_init_v8;
358 # current hash value Xi;
359 # pointer to input data;
360 # length of input data in bytes, but divisible by block size;
361 # output: next hash value Xi;
365 .type gcm_ghash_v8,%function
369 $code.=<<___ if ($flavour =~ /64/);
370 AARCH64_VALID_CALL_TARGET
372 b.hs .Lgcm_ghash_v8_4x
374 $code.=<<___ if ($flavour !~ /64/);
375 vstmdb sp!,{d8-d15} @ 32-bit ABI says so
378 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
379 @ "[rotated]" means that
380 @ loaded value would have
381 @ to be rotated in order to
382 @ make it appear as in
383 @ algorithm specification
384 subs $len,$len,#32 @ see if $len is 32 or larger
385 mov $inc,#16 @ $inc is used as post-
386 @ increment for input pointer;
387 @ as loop is modulo-scheduled
388 @ $inc is zeroed just in time
389 @ to preclude overstepping
390 @ inp[len], which means that
391 @ last block[s] are actually
392 @ loaded twice, but last
393 @ copy is not processed
394 vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2
396 vld1.64 {$H2},[$Htbl]
397 cclr $inc,eq @ is it time to zero $inc?
398 vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi
399 vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0]
400 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant
405 vext.8 $IN,$t0,$t0,#8 @ rotate I[0]
406 b.lo .Lodd_tail_v8 @ $len was less than 32
408 { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
410 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
411 # [(H*Ii+1) + (H*Xi+1)] mod P =
412 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
415 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1]
419 vext.8 $In,$t1,$t1,#8
420 veor $IN,$IN,$Xl @ I[i]^=Xi
421 vpmull.p64 $Xln,$H,$In @ H·Ii+1
422 veor $t1,$t1,$In @ Karatsuba pre-processing
423 vpmull2.p64 $Xhn,$H,$In
428 vext.8 $t2,$IN,$IN,#8
429 subs $len,$len,#32 @ is there more data?
430 vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo
431 cclr $inc,lo @ is it time to zero $inc?
433 vpmull.p64 $Xmn,$Hhl,$t1
434 veor $t2,$t2,$IN @ Karatsuba pre-processing
435 vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi
436 veor $Xl,$Xl,$Xln @ accumulate
437 vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
438 vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2]
441 cclr $inc,eq @ is it time to zero $inc?
444 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
447 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3]
452 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
457 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
458 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
459 vext.8 $In,$t1,$t1,#8
460 vext.8 $IN,$t0,$t0,#8
462 vpmull.p64 $Xln,$H,$In @ H·Ii+1
463 veor $IN,$IN,$Xh @ accumulate $IN early
465 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
466 vpmull.p64 $Xl,$Xl,$xC2
468 veor $t1,$t1,$In @ Karatsuba pre-processing
470 vpmull2.p64 $Xhn,$H,$In
471 b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes
474 vext.8 $IN,$t0,$t0,#8 @ re-construct $IN
475 adds $len,$len,#32 @ re-construct $len
476 veor $Xl,$Xl,$Xh @ re-construct $Xl
477 b.eq .Ldone_v8 @ is $len zero?
482 vext.8 $t2,$Xl,$Xl,#8
483 veor $IN,$IN,$Xl @ inp^=Xi
484 veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi
486 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
487 veor $t1,$t1,$IN @ Karatsuba pre-processing
488 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
489 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
491 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
495 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
497 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
498 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
501 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
502 vpmull.p64 $Xl,$Xl,$xC2
510 vext.8 $Xl,$Xl,$Xl,#8
511 vst1.64 {$Xl},[$Xi] @ write out Xi
514 $code.=<<___ if ($flavour !~ /64/);
515 vldmia sp!,{d8-d15} @ 32-bit ABI says so
519 .size gcm_ghash_v8,.-gcm_ghash_v8
522 if ($flavour =~ /64/) { # 4x subroutine
524 $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23));
527 .type gcm_ghash_v8_4x,%function
531 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
532 vld1.64 {$H-$H2},[$Htbl],#48 @ load twisted H, ..., H^2
534 vld1.64 {$H3-$H4},[$Htbl] @ load twisted H^3, ..., H^4
535 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant
537 vld1.64 {$I0-$j3},[$inp],#64
545 vext.8 $I3,$j3,$j3,#8
546 vext.8 $I2,$j2,$j2,#8
547 vext.8 $I1,$j1,$j1,#8
549 vpmull.p64 $Yl,$H,$I3 @ H·Ii+3
551 vpmull2.p64 $Yh,$H,$I3
552 vpmull.p64 $Ym,$Hhl,$j3
554 vpmull.p64 $t0,$H2,$I2 @ H^2·Ii+2
556 vpmull2.p64 $I2,$H2,$I2
557 vpmull2.p64 $j2,$Hhl,$j2
563 vpmull.p64 $j3,$H3,$I1 @ H^3·Ii+1
565 vpmull2.p64 $I1,$H3,$I1
566 vpmull.p64 $j1,$H34,$j1
580 vld1.64 {$I0-$j3},[$inp],#64
581 vext.8 $IN,$t0,$t0,#8
589 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
591 vpmull2.p64 $Xh,$H4,$IN
592 vext.8 $I3,$j3,$j3,#8
593 vpmull2.p64 $Xm,$H34,$t0
597 vext.8 $I2,$j2,$j2,#8
599 vext.8 $I1,$j1,$j1,#8
601 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
603 vpmull.p64 $Yl,$H,$I3 @ H·Ii+3
606 vpmull2.p64 $Yh,$H,$I3
608 vpmull.p64 $Ym,$Hhl,$j3
610 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
611 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
612 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
613 vpmull.p64 $t0,$H2,$I2 @ H^2·Ii+2
615 vpmull2.p64 $I2,$H2,$I2
617 vpmull2.p64 $j2,$Hhl,$j2
623 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
624 vpmull.p64 $Xl,$Xl,$xC2
625 vpmull.p64 $j3,$H3,$I1 @ H^3·Ii+1
628 vpmull2.p64 $I1,$H3,$I1
629 vpmull.p64 $j1,$H34,$j1
634 vext.8 $Xl,$Xl,$Xl,#8
642 vext.8 $IN,$t0,$t0,#8
644 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
646 vpmull2.p64 $Xh,$H4,$IN
647 vpmull2.p64 $Xm,$H34,$t0
660 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
663 vld1.64 {$I0-$j2},[$inp]
671 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
672 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
673 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
674 vext.8 $I2,$j2,$j2,#8
675 vext.8 $I1,$j1,$j1,#8
678 vpmull.p64 $Yl,$H,$I2 @ H·Ii+2
681 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
682 vpmull.p64 $Xl,$Xl,$xC2
684 vpmull2.p64 $Yh,$H,$I2
685 vpmull.p64 $Ym,$Hhl,$j2
687 vpmull.p64 $j3,$H2,$I1 @ H^2·Ii+1
689 vext.8 $Xl,$Xl,$Xl,#8
691 vpmull2.p64 $I1,$H2,$I1
693 vpmull2.p64 $j1,$Hhl,$j1
694 vext.8 $IN,$t0,$t0,#8
700 vpmull.p64 $Xl,$H3,$IN @ H^3·(Xi+Ii)
702 vpmull2.p64 $Xh,$H3,$IN
703 vpmull.p64 $Xm,$H34,$t0
712 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
715 vld1.64 {$I0-$j1},[$inp]
722 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
723 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
724 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
725 vext.8 $I1,$j1,$j1,#8
728 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
729 vpmull.p64 $Xl,$Xl,$xC2
732 vext.8 $Xl,$Xl,$Xl,#8
734 vpmull.p64 $Yl,$H,$I1 @ H·Ii+1
738 vext.8 $IN,$t0,$t0,#8
740 vpmull2.p64 $Yh,$H,$I1
741 vpmull.p64 $Ym,$Hhl,$j1
743 vpmull.p64 $Xl,$H2,$IN @ H^2·(Xi+Ii)
745 vpmull2.p64 $Xh,$H2,$IN
746 vpmull2.p64 $Xm,$Hhl,$t0
755 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
764 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
765 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
766 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
769 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
770 vpmull.p64 $Xl,$Xl,$xC2
773 vext.8 $Xl,$Xl,$Xl,#8
776 vext.8 $IN,$t0,$t0,#8
778 vpmull.p64 $Xl,$H,$IN
780 vpmull2.p64 $Xh,$H,$IN
781 vpmull.p64 $Xm,$Hhl,$t0
784 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
789 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
790 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
791 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
794 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
795 vpmull.p64 $Xl,$Xl,$xC2
798 vext.8 $Xl,$Xl,$Xl,#8
803 vst1.64 {$Xl},[$Xi] @ write out Xi
806 .size gcm_ghash_v8_4x,.-gcm_ghash_v8_4x
813 .asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
818 if ($flavour =~ /64/) { ######## 64-bit code
822 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
823 sprintf "ins v%d.d[%d],v%d.d[%d]",$1<8?$1:$1+8,($2 eq "lo")?0:1,
824 $3<8?$3:$3+8,($4 eq "lo")?0:1;
826 foreach(split("\n",$code)) {
827 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
828 s/vmov\.i8/movi/o or # fix up legacy mnemonics
829 s/vmov\s+(.*)/unvmov($1)/geo or
831 s/vshr\.s/sshr\.s/o or
833 s/^(\s+)v/$1/o or # strip off v prefix
836 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
837 s/@\s/\/\//o; # old->new style commentary
839 # fix up remaining legacy suffixes
841 s/\.[uis]?32//o and s/\.16b/\.4s/go;
842 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
843 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
844 s/\.[uisp]?64//o and s/\.16b/\.2d/go;
845 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
847 # Switch preprocessor checks to aarch64 versions.
848 s/__ARME([BL])__/__AARCH64E$1__/go;
852 } else { ######## 32-bit code
856 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
857 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
860 my ($mnemonic,$arg)=@_;
862 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
863 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
864 |(($2&7)<<17)|(($2&8)<<4)
865 |(($3&7)<<1) |(($3&8)<<2);
866 $word |= 0x00010001 if ($mnemonic =~ "2");
867 # since ARMv7 instructions are always encoded little-endian.
868 # correct solution is to use .inst directive, but older
869 # assemblers don't implement it:-(
870 sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
871 $word&0xff,($word>>8)&0xff,
872 ($word>>16)&0xff,($word>>24)&0xff,
877 foreach(split("\n",$code)) {
878 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
879 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
880 s/\/\/\s?/@ /o; # new->old style commentary
882 # fix up remaining new-style suffixes
885 s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2 $1,#0/o or
886 s/vdup\.32\s+(.*)/unvdup32($1)/geo or
887 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
888 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
890 s/^(\s+)ret/$1bx\tlr/o;
892 if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
900 close STDOUT or die "error closing STDOUT: $!"; # enforce flush