2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
21 # Initial version was developed in tight cooperation with Ard
22 # Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
23 # other assembly modules. Just like aesv8-armx.pl this module
24 # supports both AArch32 and AArch64 execution modes.
28 # Implement 2x aggregated reduction [see ghash-x86.pl for background
31 # Current performance in cycles per processed byte:
33 # PMULL[2] 32-bit NEON(*)
35 # Cortex-A53 1.01 8.39
36 # Cortex-A57 1.17 7.61
40 # (*) presented for reference/comparison purposes;
45 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
46 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
47 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
48 die "can't locate arm-xlate.pl";
50 open OUT,"| \"$^X\" $xlate $flavour $output";
53 $Xi="x0"; # argument block
61 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
62 my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
69 $code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/);
70 $code.=".fpu neon\n.code 32\n" if ($flavour !~ /64/);
72 ################################################################################
73 # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
75 # input: 128-bit H - secret parameter E(K,0^128)
76 # output: precomputed table filled with degrees of twisted H;
77 # H is twisted to handle reverse bitness of GHASH;
78 # only few of 16 slots of Htable[16] are used;
79 # data is opaque to outside world (which allows to
80 # optimize the code independently);
84 .type gcm_init_v8,%function
87 vld1.64 {$t1},[x1] @ load input H
89 vshl.i64 $xC2,$xC2,#57 @ 0xc2.0
93 vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01
95 vshr.s32 $t1,$t1,#31 @ broadcast carry bit
100 vorr $IN,$IN,$t2 @ H<<<=1
101 veor $H,$IN,$t0 @ twisted H
102 vst1.64 {$H},[x0],#16 @ store Htable[0]
105 vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing
108 vpmull2.p64 $Xh,$H,$H
109 vpmull.p64 $Xm,$t0,$t0
111 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
115 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
117 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
118 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
121 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
122 vpmull.p64 $Xl,$Xl,$xC2
126 vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing
128 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed
129 vst1.64 {$Hhl-$H2},[x0] @ store Htable[1..2]
132 .size gcm_init_v8,.-gcm_init_v8
134 ################################################################################
135 # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
137 # input: Xi - current hash value;
138 # Htable - table precomputed in gcm_init_v8;
139 # output: Xi - next hash value Xi;
143 .type gcm_gmult_v8,%function
146 vld1.64 {$t1},[$Xi] @ load Xi
148 vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ...
149 vshl.u64 $xC2,$xC2,#57
153 vext.8 $IN,$t1,$t1,#8
155 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
156 veor $t1,$t1,$IN @ Karatsuba pre-processing
157 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
158 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
160 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
164 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
166 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
167 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
170 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
171 vpmull.p64 $Xl,$Xl,$xC2
178 vext.8 $Xl,$Xl,$Xl,#8
179 vst1.64 {$Xl},[$Xi] @ write out Xi
182 .size gcm_gmult_v8,.-gcm_gmult_v8
184 ################################################################################
185 # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
187 # input: table precomputed in gcm_init_v8;
188 # current hash value Xi;
189 # pointer to input data;
190 # length of input data in bytes, but divisible by block size;
191 # output: next hash value Xi;
195 .type gcm_ghash_v8,%function
199 $code.=<<___ if ($flavour !~ /64/);
200 vstmdb sp!,{d8-d15} @ 32-bit ABI says so
203 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
204 @ "[rotated]" means that
205 @ loaded value would have
206 @ to be rotated in order to
207 @ make it appear as in
208 @ alorithm specification
209 subs $len,$len,#32 @ see if $len is 32 or larger
210 mov $inc,#16 @ $inc is used as post-
211 @ increment for input pointer;
212 @ as loop is modulo-scheduled
213 @ $inc is zeroed just in time
214 @ to preclude oversteping
215 @ inp[len], which means that
216 @ last block[s] are actually
217 @ loaded twice, but last
218 @ copy is not processed
219 vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2
221 vld1.64 {$H2},[$Htbl]
222 cclr $inc,eq @ is it time to zero $inc?
223 vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi
224 vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0]
225 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant
230 vext.8 $IN,$t0,$t0,#8 @ rotate I[0]
231 b.lo .Lodd_tail_v8 @ $len was less than 32
233 { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
235 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
236 # [(H*Ii+1) + (H*Xi+1)] mod P =
237 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
240 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1]
244 vext.8 $In,$t1,$t1,#8
245 veor $IN,$IN,$Xl @ I[i]^=Xi
246 vpmull.p64 $Xln,$H,$In @ H·Ii+1
247 veor $t1,$t1,$In @ Karatsuba pre-processing
248 vpmull2.p64 $Xhn,$H,$In
253 vext.8 $t2,$IN,$IN,#8
254 subs $len,$len,#32 @ is there more data?
255 vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo
256 cclr $inc,lo @ is it time to zero $inc?
258 vpmull.p64 $Xmn,$Hhl,$t1
259 veor $t2,$t2,$IN @ Karatsuba pre-processing
260 vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi
261 veor $Xl,$Xl,$Xln @ accumulate
262 vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
263 vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2]
266 cclr $inc,eq @ is it time to zero $inc?
269 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
272 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3]
277 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
282 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
283 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
284 vext.8 $In,$t1,$t1,#8
285 vext.8 $IN,$t0,$t0,#8
287 vpmull.p64 $Xln,$H,$In @ H·Ii+1
288 veor $IN,$IN,$Xh @ accumulate $IN early
290 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
291 vpmull.p64 $Xl,$Xl,$xC2
293 veor $t1,$t1,$In @ Karatsuba pre-processing
295 vpmull2.p64 $Xhn,$H,$In
296 b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes
299 vext.8 $IN,$t0,$t0,#8 @ re-construct $IN
300 adds $len,$len,#32 @ re-construct $len
301 veor $Xl,$Xl,$Xh @ re-construct $Xl
302 b.eq .Ldone_v8 @ is $len zero?
307 vext.8 $t2,$Xl,$Xl,#8
308 veor $IN,$IN,$Xl @ inp^=Xi
309 veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi
311 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
312 veor $t1,$t1,$IN @ Karatsuba pre-processing
313 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
314 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
316 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
320 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
322 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
323 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
326 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
327 vpmull.p64 $Xl,$Xl,$xC2
335 vext.8 $Xl,$Xl,$Xl,#8
336 vst1.64 {$Xl},[$Xi] @ write out Xi
339 $code.=<<___ if ($flavour !~ /64/);
340 vldmia sp!,{d8-d15} @ 32-bit ABI says so
344 .size gcm_ghash_v8,.-gcm_ghash_v8
348 .asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
352 if ($flavour =~ /64/) { ######## 64-bit code
356 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
357 sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
359 foreach(split("\n",$code)) {
360 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
361 s/vmov\.i8/movi/o or # fix up legacy mnemonics
362 s/vmov\s+(.*)/unvmov($1)/geo or
364 s/vshr\.s/sshr\.s/o or
366 s/^(\s+)v/$1/o or # strip off v prefix
369 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
370 s/@\s/\/\//o; # old->new style commentary
372 # fix up remainig legacy suffixes
374 s/\.[uis]?32//o and s/\.16b/\.4s/go;
375 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
376 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
377 s/\.[uisp]?64//o and s/\.16b/\.2d/go;
378 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
382 } else { ######## 32-bit code
386 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
387 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
390 my ($mnemonic,$arg)=@_;
392 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
393 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
394 |(($2&7)<<17)|(($2&8)<<4)
395 |(($3&7)<<1) |(($3&8)<<2);
396 $word |= 0x00010001 if ($mnemonic =~ "2");
397 # since ARMv7 instructions are always encoded little-endian.
398 # correct solution is to use .inst directive, but older
399 # assemblers don't implement it:-(
400 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
401 $word&0xff,($word>>8)&0xff,
402 ($word>>16)&0xff,($word>>24)&0xff,
407 foreach(split("\n",$code)) {
408 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
409 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
410 s/\/\/\s?/@ /o; # new->old style commentary
412 # fix up remainig new-style suffixes
415 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
416 s/vdup\.32\s+(.*)/unvdup32($1)/geo or
417 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
418 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
420 s/^(\s+)ret/$1bx\tlr/o;
426 close STDOUT; # enforce flush