81bc1cbf1c228b72eb69db95caf3c20848691474
[openssl.git] / crypto / aes / asm / aesv8-armx.pl
1 #! /usr/bin/env perl
2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements support for ARMv8 AES instructions. The
18 # module is endian-agnostic in sense that it supports both big- and
19 # little-endian cases. As does it support both 32- and 64-bit modes
20 # of operation. Latter is achieved by limiting amount of utilized
21 # registers to 16, which implies additional NEON load and integer
22 # instructions. This has no effect on mighty Apple A7, where results
23 # are literally equal to the theoretical estimates based on AES
24 # instruction latencies and issue rates. On Cortex-A53, an in-order
25 # execution core, this costs up to 10-15%, which is partially
26 # compensated by implementing dedicated code path for 128-bit
27 # CBC encrypt case. On Cortex-A57 parallelizable mode performance
28 # seems to be limited by sheer amount of NEON instructions...
29 #
30 # Performance in cycles per byte processed with 128-bit key:
31 #
32 #               CBC enc         CBC dec         CTR
33 # Apple A7      2.39            1.20            1.20
34 # Cortex-A53    1.32            1.29            1.46
35 # Cortex-A57(*) 1.95            0.85            0.93
36 # Denver        1.96            0.86            0.80
37 # Mongoose      1.33            1.20            1.20
38 # Kryo          1.26            0.94            1.00
39 #
40 # (*)   original 3.64/1.34/1.32 results were for r0p0 revision
41 #       and are still same even for updated module;
42
43 $flavour = shift;
44 $output  = shift;
45
46 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
47 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
48 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
49 die "can't locate arm-xlate.pl";
50
51 open OUT,"| \"$^X\" $xlate $flavour $output";
52 *STDOUT=*OUT;
53
54 $prefix="aes_v8";
55
56 $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
57
58 $code=<<___;
59 #include "arm_arch.h"
60
61 #if __ARM_MAX_ARCH__>=7
62 ___
63 $code.=".arch   armv8-a+crypto\n.text\n"                if ($flavour =~ /64/);
64 $code.=<<___                                            if ($flavour !~ /64/);
65 .arch   armv7-a // don't confuse not-so-latest binutils with argv8 :-)
66 .fpu    neon
67 #ifdef  __thumb2__
68 .syntax unified
69 .thumb
70 # define INST(a,b,c,d)  $_byte  c,d|0xc,a,b
71 #else
72 .code   32
73 # define INST(a,b,c,d)  $_byte  a,b,c,d
74 #endif
75
76 .text
77 ___
78
79 # Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
80 # NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
81 # maintain both 32- and 64-bit codes within single module and
82 # transliterate common code to either flavour with regex vodoo.
83 #
84 {{{
85 my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
86 my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
87         $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
88
89
90 $code.=<<___;
91 .align  5
92 .Lrcon:
93 .long   0x01,0x01,0x01,0x01
94 .long   0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d     // rotate-n-splat
95 .long   0x1b,0x1b,0x1b,0x1b
96
97 .globl  ${prefix}_set_encrypt_key
98 .type   ${prefix}_set_encrypt_key,%function
99 .align  5
100 ${prefix}_set_encrypt_key:
101 .Lenc_key:
102 ___
103 $code.=<<___    if ($flavour =~ /64/);
104         stp     x29,x30,[sp,#-16]!
105         add     x29,sp,#0
106 ___
107 $code.=<<___;
108         mov     $ptr,#-1
109         cmp     $inp,#0
110         b.eq    .Lenc_key_abort
111         cmp     $out,#0
112         b.eq    .Lenc_key_abort
113         mov     $ptr,#-2
114         cmp     $bits,#128
115         b.lt    .Lenc_key_abort
116         cmp     $bits,#256
117         b.gt    .Lenc_key_abort
118         tst     $bits,#0x3f
119         b.ne    .Lenc_key_abort
120
121         adr     $ptr,.Lrcon
122         cmp     $bits,#192
123
124         veor    $zero,$zero,$zero
125         vld1.8  {$in0},[$inp],#16
126         mov     $bits,#8                // reuse $bits
127         vld1.32 {$rcon,$mask},[$ptr],#32
128
129         b.lt    .Loop128
130         b.eq    .L192
131         b       .L256
132
133 .align  4
134 .Loop128:
135         vtbl.8  $key,{$in0},$mask
136         vext.8  $tmp,$zero,$in0,#12
137         vst1.32 {$in0},[$out],#16
138         aese    $key,$zero
139         subs    $bits,$bits,#1
140
141         veor    $in0,$in0,$tmp
142         vext.8  $tmp,$zero,$tmp,#12
143         veor    $in0,$in0,$tmp
144         vext.8  $tmp,$zero,$tmp,#12
145          veor   $key,$key,$rcon
146         veor    $in0,$in0,$tmp
147         vshl.u8 $rcon,$rcon,#1
148         veor    $in0,$in0,$key
149         b.ne    .Loop128
150
151         vld1.32 {$rcon},[$ptr]
152
153         vtbl.8  $key,{$in0},$mask
154         vext.8  $tmp,$zero,$in0,#12
155         vst1.32 {$in0},[$out],#16
156         aese    $key,$zero
157
158         veor    $in0,$in0,$tmp
159         vext.8  $tmp,$zero,$tmp,#12
160         veor    $in0,$in0,$tmp
161         vext.8  $tmp,$zero,$tmp,#12
162          veor   $key,$key,$rcon
163         veor    $in0,$in0,$tmp
164         vshl.u8 $rcon,$rcon,#1
165         veor    $in0,$in0,$key
166
167         vtbl.8  $key,{$in0},$mask
168         vext.8  $tmp,$zero,$in0,#12
169         vst1.32 {$in0},[$out],#16
170         aese    $key,$zero
171
172         veor    $in0,$in0,$tmp
173         vext.8  $tmp,$zero,$tmp,#12
174         veor    $in0,$in0,$tmp
175         vext.8  $tmp,$zero,$tmp,#12
176          veor   $key,$key,$rcon
177         veor    $in0,$in0,$tmp
178         veor    $in0,$in0,$key
179         vst1.32 {$in0},[$out]
180         add     $out,$out,#0x50
181
182         mov     $rounds,#10
183         b       .Ldone
184
185 .align  4
186 .L192:
187         vld1.8  {$in1},[$inp],#8
188         vmov.i8 $key,#8                 // borrow $key
189         vst1.32 {$in0},[$out],#16
190         vsub.i8 $mask,$mask,$key        // adjust the mask
191
192 .Loop192:
193         vtbl.8  $key,{$in1},$mask
194         vext.8  $tmp,$zero,$in0,#12
195         vst1.32 {$in1},[$out],#8
196         aese    $key,$zero
197         subs    $bits,$bits,#1
198
199         veor    $in0,$in0,$tmp
200         vext.8  $tmp,$zero,$tmp,#12
201         veor    $in0,$in0,$tmp
202         vext.8  $tmp,$zero,$tmp,#12
203         veor    $in0,$in0,$tmp
204
205         vdup.32 $tmp,${in0}[3]
206         veor    $tmp,$tmp,$in1
207          veor   $key,$key,$rcon
208         vext.8  $in1,$zero,$in1,#12
209         vshl.u8 $rcon,$rcon,#1
210         veor    $in1,$in1,$tmp
211         veor    $in0,$in0,$key
212         veor    $in1,$in1,$key
213         vst1.32 {$in0},[$out],#16
214         b.ne    .Loop192
215
216         mov     $rounds,#12
217         add     $out,$out,#0x20
218         b       .Ldone
219
220 .align  4
221 .L256:
222         vld1.8  {$in1},[$inp]
223         mov     $bits,#7
224         mov     $rounds,#14
225         vst1.32 {$in0},[$out],#16
226
227 .Loop256:
228         vtbl.8  $key,{$in1},$mask
229         vext.8  $tmp,$zero,$in0,#12
230         vst1.32 {$in1},[$out],#16
231         aese    $key,$zero
232         subs    $bits,$bits,#1
233
234         veor    $in0,$in0,$tmp
235         vext.8  $tmp,$zero,$tmp,#12
236         veor    $in0,$in0,$tmp
237         vext.8  $tmp,$zero,$tmp,#12
238          veor   $key,$key,$rcon
239         veor    $in0,$in0,$tmp
240         vshl.u8 $rcon,$rcon,#1
241         veor    $in0,$in0,$key
242         vst1.32 {$in0},[$out],#16
243         b.eq    .Ldone
244
245         vdup.32 $key,${in0}[3]          // just splat
246         vext.8  $tmp,$zero,$in1,#12
247         aese    $key,$zero
248
249         veor    $in1,$in1,$tmp
250         vext.8  $tmp,$zero,$tmp,#12
251         veor    $in1,$in1,$tmp
252         vext.8  $tmp,$zero,$tmp,#12
253         veor    $in1,$in1,$tmp
254
255         veor    $in1,$in1,$key
256         b       .Loop256
257
258 .Ldone:
259         str     $rounds,[$out]
260         mov     $ptr,#0
261
262 .Lenc_key_abort:
263         mov     x0,$ptr                 // return value
264         `"ldr   x29,[sp],#16"           if ($flavour =~ /64/)`
265         ret
266 .size   ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
267
268 .globl  ${prefix}_set_decrypt_key
269 .type   ${prefix}_set_decrypt_key,%function
270 .align  5
271 ${prefix}_set_decrypt_key:
272 ___
273 $code.=<<___    if ($flavour =~ /64/);
274         .inst   0xd503233f              // paciasp
275         stp     x29,x30,[sp,#-16]!
276         add     x29,sp,#0
277 ___
278 $code.=<<___    if ($flavour !~ /64/);
279         stmdb   sp!,{r4,lr}
280 ___
281 $code.=<<___;
282         bl      .Lenc_key
283
284         cmp     x0,#0
285         b.ne    .Ldec_key_abort
286
287         sub     $out,$out,#240          // restore original $out
288         mov     x4,#-16
289         add     $inp,$out,x12,lsl#4     // end of key schedule
290
291         vld1.32 {v0.16b},[$out]
292         vld1.32 {v1.16b},[$inp]
293         vst1.32 {v0.16b},[$inp],x4
294         vst1.32 {v1.16b},[$out],#16
295
296 .Loop_imc:
297         vld1.32 {v0.16b},[$out]
298         vld1.32 {v1.16b},[$inp]
299         aesimc  v0.16b,v0.16b
300         aesimc  v1.16b,v1.16b
301         vst1.32 {v0.16b},[$inp],x4
302         vst1.32 {v1.16b},[$out],#16
303         cmp     $inp,$out
304         b.hi    .Loop_imc
305
306         vld1.32 {v0.16b},[$out]
307         aesimc  v0.16b,v0.16b
308         vst1.32 {v0.16b},[$inp]
309
310         eor     x0,x0,x0                // return value
311 .Ldec_key_abort:
312 ___
313 $code.=<<___    if ($flavour !~ /64/);
314         ldmia   sp!,{r4,pc}
315 ___
316 $code.=<<___    if ($flavour =~ /64/);
317         ldp     x29,x30,[sp],#16
318         .inst   0xd50323bf              // autiasp
319         ret
320 ___
321 $code.=<<___;
322 .size   ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
323 ___
324 }}}
325 {{{
326 sub gen_block () {
327 my $dir = shift;
328 my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
329 my ($inp,$out,$key)=map("x$_",(0..2));
330 my $rounds="w3";
331 my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
332
333 $code.=<<___;
334 .globl  ${prefix}_${dir}crypt
335 .type   ${prefix}_${dir}crypt,%function
336 .align  5
337 ${prefix}_${dir}crypt:
338         ldr     $rounds,[$key,#240]
339         vld1.32 {$rndkey0},[$key],#16
340         vld1.8  {$inout},[$inp]
341         sub     $rounds,$rounds,#2
342         vld1.32 {$rndkey1},[$key],#16
343
344 .Loop_${dir}c:
345         aes$e   $inout,$rndkey0
346         aes$mc  $inout,$inout
347         vld1.32 {$rndkey0},[$key],#16
348         subs    $rounds,$rounds,#2
349         aes$e   $inout,$rndkey1
350         aes$mc  $inout,$inout
351         vld1.32 {$rndkey1},[$key],#16
352         b.gt    .Loop_${dir}c
353
354         aes$e   $inout,$rndkey0
355         aes$mc  $inout,$inout
356         vld1.32 {$rndkey0},[$key]
357         aes$e   $inout,$rndkey1
358         veor    $inout,$inout,$rndkey0
359
360         vst1.8  {$inout},[$out]
361         ret
362 .size   ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
363 ___
364 }
365 &gen_block("en");
366 &gen_block("de");
367 }}}
368 {{{
369 my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
370 my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
371 my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
372
373 my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
374 my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
375
376 ### q8-q15      preloaded key schedule
377
378 $code.=<<___;
379 .globl  ${prefix}_cbc_encrypt
380 .type   ${prefix}_cbc_encrypt,%function
381 .align  5
382 ${prefix}_cbc_encrypt:
383 ___
384 $code.=<<___    if ($flavour =~ /64/);
385         stp     x29,x30,[sp,#-16]!
386         add     x29,sp,#0
387 ___
388 $code.=<<___    if ($flavour !~ /64/);
389         mov     ip,sp
390         stmdb   sp!,{r4-r8,lr}
391         vstmdb  sp!,{d8-d15}            @ ABI specification says so
392         ldmia   ip,{r4-r5}              @ load remaining args
393 ___
394 $code.=<<___;
395         subs    $len,$len,#16
396         mov     $step,#16
397         b.lo    .Lcbc_abort
398         cclr    $step,eq
399
400         cmp     $enc,#0                 // en- or decrypting?
401         ldr     $rounds,[$key,#240]
402         and     $len,$len,#-16
403         vld1.8  {$ivec},[$ivp]
404         vld1.8  {$dat},[$inp],$step
405
406         vld1.32 {q8-q9},[$key]          // load key schedule...
407         sub     $rounds,$rounds,#6
408         add     $key_,$key,x5,lsl#4     // pointer to last 7 round keys
409         sub     $rounds,$rounds,#2
410         vld1.32 {q10-q11},[$key_],#32
411         vld1.32 {q12-q13},[$key_],#32
412         vld1.32 {q14-q15},[$key_],#32
413         vld1.32 {$rndlast},[$key_]
414
415         add     $key_,$key,#32
416         mov     $cnt,$rounds
417         b.eq    .Lcbc_dec
418
419         cmp     $rounds,#2
420         veor    $dat,$dat,$ivec
421         veor    $rndzero_n_last,q8,$rndlast
422         b.eq    .Lcbc_enc128
423
424         vld1.32 {$in0-$in1},[$key_]
425         add     $key_,$key,#16
426         add     $key4,$key,#16*4
427         add     $key5,$key,#16*5
428         aese    $dat,q8
429         aesmc   $dat,$dat
430         add     $key6,$key,#16*6
431         add     $key7,$key,#16*7
432         b       .Lenter_cbc_enc
433
434 .align  4
435 .Loop_cbc_enc:
436         aese    $dat,q8
437         aesmc   $dat,$dat
438          vst1.8 {$ivec},[$out],#16
439 .Lenter_cbc_enc:
440         aese    $dat,q9
441         aesmc   $dat,$dat
442         aese    $dat,$in0
443         aesmc   $dat,$dat
444         vld1.32 {q8},[$key4]
445         cmp     $rounds,#4
446         aese    $dat,$in1
447         aesmc   $dat,$dat
448         vld1.32 {q9},[$key5]
449         b.eq    .Lcbc_enc192
450
451         aese    $dat,q8
452         aesmc   $dat,$dat
453         vld1.32 {q8},[$key6]
454         aese    $dat,q9
455         aesmc   $dat,$dat
456         vld1.32 {q9},[$key7]
457         nop
458
459 .Lcbc_enc192:
460         aese    $dat,q8
461         aesmc   $dat,$dat
462          subs   $len,$len,#16
463         aese    $dat,q9
464         aesmc   $dat,$dat
465          cclr   $step,eq
466         aese    $dat,q10
467         aesmc   $dat,$dat
468         aese    $dat,q11
469         aesmc   $dat,$dat
470          vld1.8 {q8},[$inp],$step
471         aese    $dat,q12
472         aesmc   $dat,$dat
473          veor   q8,q8,$rndzero_n_last
474         aese    $dat,q13
475         aesmc   $dat,$dat
476          vld1.32 {q9},[$key_]           // re-pre-load rndkey[1]
477         aese    $dat,q14
478         aesmc   $dat,$dat
479         aese    $dat,q15
480         veor    $ivec,$dat,$rndlast
481         b.hs    .Loop_cbc_enc
482
483         vst1.8  {$ivec},[$out],#16
484         b       .Lcbc_done
485
486 .align  5
487 .Lcbc_enc128:
488         vld1.32 {$in0-$in1},[$key_]
489         aese    $dat,q8
490         aesmc   $dat,$dat
491         b       .Lenter_cbc_enc128
492 .Loop_cbc_enc128:
493         aese    $dat,q8
494         aesmc   $dat,$dat
495          vst1.8 {$ivec},[$out],#16
496 .Lenter_cbc_enc128:
497         aese    $dat,q9
498         aesmc   $dat,$dat
499          subs   $len,$len,#16
500         aese    $dat,$in0
501         aesmc   $dat,$dat
502          cclr   $step,eq
503         aese    $dat,$in1
504         aesmc   $dat,$dat
505         aese    $dat,q10
506         aesmc   $dat,$dat
507         aese    $dat,q11
508         aesmc   $dat,$dat
509          vld1.8 {q8},[$inp],$step
510         aese    $dat,q12
511         aesmc   $dat,$dat
512         aese    $dat,q13
513         aesmc   $dat,$dat
514         aese    $dat,q14
515         aesmc   $dat,$dat
516          veor   q8,q8,$rndzero_n_last
517         aese    $dat,q15
518         veor    $ivec,$dat,$rndlast
519         b.hs    .Loop_cbc_enc128
520
521         vst1.8  {$ivec},[$out],#16
522         b       .Lcbc_done
523 ___
524 {
525 my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
526 $code.=<<___;
527 .align  5
528 .Lcbc_dec:
529         vld1.8  {$dat2},[$inp],#16
530         subs    $len,$len,#32           // bias
531         add     $cnt,$rounds,#2
532         vorr    $in1,$dat,$dat
533         vorr    $dat1,$dat,$dat
534         vorr    $in2,$dat2,$dat2
535         b.lo    .Lcbc_dec_tail
536
537         vorr    $dat1,$dat2,$dat2
538         vld1.8  {$dat2},[$inp],#16
539         vorr    $in0,$dat,$dat
540         vorr    $in1,$dat1,$dat1
541         vorr    $in2,$dat2,$dat2
542
543 .Loop3x_cbc_dec:
544         aesd    $dat0,q8
545         aesimc  $dat0,$dat0
546         aesd    $dat1,q8
547         aesimc  $dat1,$dat1
548         aesd    $dat2,q8
549         aesimc  $dat2,$dat2
550         vld1.32 {q8},[$key_],#16
551         subs    $cnt,$cnt,#2
552         aesd    $dat0,q9
553         aesimc  $dat0,$dat0
554         aesd    $dat1,q9
555         aesimc  $dat1,$dat1
556         aesd    $dat2,q9
557         aesimc  $dat2,$dat2
558         vld1.32 {q9},[$key_],#16
559         b.gt    .Loop3x_cbc_dec
560
561         aesd    $dat0,q8
562         aesimc  $dat0,$dat0
563         aesd    $dat1,q8
564         aesimc  $dat1,$dat1
565         aesd    $dat2,q8
566         aesimc  $dat2,$dat2
567          veor   $tmp0,$ivec,$rndlast
568          subs   $len,$len,#0x30
569          veor   $tmp1,$in0,$rndlast
570          mov.lo x6,$len                 // x6, $cnt, is zero at this point
571         aesd    $dat0,q9
572         aesimc  $dat0,$dat0
573         aesd    $dat1,q9
574         aesimc  $dat1,$dat1
575         aesd    $dat2,q9
576         aesimc  $dat2,$dat2
577          veor   $tmp2,$in1,$rndlast
578          add    $inp,$inp,x6            // $inp is adjusted in such way that
579                                         // at exit from the loop $dat1-$dat2
580                                         // are loaded with last "words"
581          vorr   $ivec,$in2,$in2
582          mov    $key_,$key
583         aesd    $dat0,q12
584         aesimc  $dat0,$dat0
585         aesd    $dat1,q12
586         aesimc  $dat1,$dat1
587         aesd    $dat2,q12
588         aesimc  $dat2,$dat2
589          vld1.8 {$in0},[$inp],#16
590         aesd    $dat0,q13
591         aesimc  $dat0,$dat0
592         aesd    $dat1,q13
593         aesimc  $dat1,$dat1
594         aesd    $dat2,q13
595         aesimc  $dat2,$dat2
596          vld1.8 {$in1},[$inp],#16
597         aesd    $dat0,q14
598         aesimc  $dat0,$dat0
599         aesd    $dat1,q14
600         aesimc  $dat1,$dat1
601         aesd    $dat2,q14
602         aesimc  $dat2,$dat2
603          vld1.8 {$in2},[$inp],#16
604         aesd    $dat0,q15
605         aesd    $dat1,q15
606         aesd    $dat2,q15
607          vld1.32 {q8},[$key_],#16       // re-pre-load rndkey[0]
608          add    $cnt,$rounds,#2
609         veor    $tmp0,$tmp0,$dat0
610         veor    $tmp1,$tmp1,$dat1
611         veor    $dat2,$dat2,$tmp2
612          vld1.32 {q9},[$key_],#16       // re-pre-load rndkey[1]
613         vst1.8  {$tmp0},[$out],#16
614          vorr   $dat0,$in0,$in0
615         vst1.8  {$tmp1},[$out],#16
616          vorr   $dat1,$in1,$in1
617         vst1.8  {$dat2},[$out],#16
618          vorr   $dat2,$in2,$in2
619         b.hs    .Loop3x_cbc_dec
620
621         cmn     $len,#0x30
622         b.eq    .Lcbc_done
623         nop
624
625 .Lcbc_dec_tail:
626         aesd    $dat1,q8
627         aesimc  $dat1,$dat1
628         aesd    $dat2,q8
629         aesimc  $dat2,$dat2
630         vld1.32 {q8},[$key_],#16
631         subs    $cnt,$cnt,#2
632         aesd    $dat1,q9
633         aesimc  $dat1,$dat1
634         aesd    $dat2,q9
635         aesimc  $dat2,$dat2
636         vld1.32 {q9},[$key_],#16
637         b.gt    .Lcbc_dec_tail
638
639         aesd    $dat1,q8
640         aesimc  $dat1,$dat1
641         aesd    $dat2,q8
642         aesimc  $dat2,$dat2
643         aesd    $dat1,q9
644         aesimc  $dat1,$dat1
645         aesd    $dat2,q9
646         aesimc  $dat2,$dat2
647         aesd    $dat1,q12
648         aesimc  $dat1,$dat1
649         aesd    $dat2,q12
650         aesimc  $dat2,$dat2
651          cmn    $len,#0x20
652         aesd    $dat1,q13
653         aesimc  $dat1,$dat1
654         aesd    $dat2,q13
655         aesimc  $dat2,$dat2
656          veor   $tmp1,$ivec,$rndlast
657         aesd    $dat1,q14
658         aesimc  $dat1,$dat1
659         aesd    $dat2,q14
660         aesimc  $dat2,$dat2
661          veor   $tmp2,$in1,$rndlast
662         aesd    $dat1,q15
663         aesd    $dat2,q15
664         b.eq    .Lcbc_dec_one
665         veor    $tmp1,$tmp1,$dat1
666         veor    $tmp2,$tmp2,$dat2
667          vorr   $ivec,$in2,$in2
668         vst1.8  {$tmp1},[$out],#16
669         vst1.8  {$tmp2},[$out],#16
670         b       .Lcbc_done
671
672 .Lcbc_dec_one:
673         veor    $tmp1,$tmp1,$dat2
674          vorr   $ivec,$in2,$in2
675         vst1.8  {$tmp1},[$out],#16
676
677 .Lcbc_done:
678         vst1.8  {$ivec},[$ivp]
679 .Lcbc_abort:
680 ___
681 }
682 $code.=<<___    if ($flavour !~ /64/);
683         vldmia  sp!,{d8-d15}
684         ldmia   sp!,{r4-r8,pc}
685 ___
686 $code.=<<___    if ($flavour =~ /64/);
687         ldr     x29,[sp],#16
688         ret
689 ___
690 $code.=<<___;
691 .size   ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
692 ___
693 }}}
694 {{{
695 my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
696 my ($rounds,$cnt,$key_)=("w5","w6","x7");
697 my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
698 my $step="x12";         # aliases with $tctr2
699
700 my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
701 my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
702
703 my ($dat,$tmp)=($dat0,$tmp0);
704
705 ### q8-q15      preloaded key schedule
706
707 $code.=<<___;
708 .globl  ${prefix}_ctr32_encrypt_blocks
709 .type   ${prefix}_ctr32_encrypt_blocks,%function
710 .align  5
711 ${prefix}_ctr32_encrypt_blocks:
712 ___
713 $code.=<<___    if ($flavour =~ /64/);
714         stp             x29,x30,[sp,#-16]!
715         add             x29,sp,#0
716 ___
717 $code.=<<___    if ($flavour !~ /64/);
718         mov             ip,sp
719         stmdb           sp!,{r4-r10,lr}
720         vstmdb          sp!,{d8-d15}            @ ABI specification says so
721         ldr             r4, [ip]                @ load remaining arg
722 ___
723 $code.=<<___;
724         ldr             $rounds,[$key,#240]
725
726         ldr             $ctr, [$ivp, #12]
727         vld1.32         {$dat0},[$ivp]
728
729         vld1.32         {q8-q9},[$key]          // load key schedule...
730         sub             $rounds,$rounds,#4
731         mov             $step,#16
732         cmp             $len,#2
733         add             $key_,$key,x5,lsl#4     // pointer to last 5 round keys
734         sub             $rounds,$rounds,#2
735         vld1.32         {q12-q13},[$key_],#32
736         vld1.32         {q14-q15},[$key_],#32
737         vld1.32         {$rndlast},[$key_]
738         add             $key_,$key,#32
739         mov             $cnt,$rounds
740         cclr            $step,lo
741 #ifndef __ARMEB__
742         rev             $ctr, $ctr
743 #endif
744         vorr            $dat1,$dat0,$dat0
745         add             $tctr1, $ctr, #1
746         vorr            $dat2,$dat0,$dat0
747         add             $ctr, $ctr, #2
748         vorr            $ivec,$dat0,$dat0
749         rev             $tctr1, $tctr1
750         vmov.32         ${dat1}[3],$tctr1
751         b.ls            .Lctr32_tail
752         rev             $tctr2, $ctr
753         sub             $len,$len,#3            // bias
754         vmov.32         ${dat2}[3],$tctr2
755         b               .Loop3x_ctr32
756
757 .align  4
758 .Loop3x_ctr32:
759         aese            $dat0,q8
760         aesmc           $dat0,$dat0
761         aese            $dat1,q8
762         aesmc           $dat1,$dat1
763         aese            $dat2,q8
764         aesmc           $dat2,$dat2
765         vld1.32         {q8},[$key_],#16
766         subs            $cnt,$cnt,#2
767         aese            $dat0,q9
768         aesmc           $dat0,$dat0
769         aese            $dat1,q9
770         aesmc           $dat1,$dat1
771         aese            $dat2,q9
772         aesmc           $dat2,$dat2
773         vld1.32         {q9},[$key_],#16
774         b.gt            .Loop3x_ctr32
775
776         aese            $dat0,q8
777         aesmc           $tmp0,$dat0
778         aese            $dat1,q8
779         aesmc           $tmp1,$dat1
780          vld1.8         {$in0},[$inp],#16
781          vorr           $dat0,$ivec,$ivec
782         aese            $dat2,q8
783         aesmc           $dat2,$dat2
784          vld1.8         {$in1},[$inp],#16
785          vorr           $dat1,$ivec,$ivec
786         aese            $tmp0,q9
787         aesmc           $tmp0,$tmp0
788         aese            $tmp1,q9
789         aesmc           $tmp1,$tmp1
790          vld1.8         {$in2},[$inp],#16
791          mov            $key_,$key
792         aese            $dat2,q9
793         aesmc           $tmp2,$dat2
794          vorr           $dat2,$ivec,$ivec
795          add            $tctr0,$ctr,#1
796         aese            $tmp0,q12
797         aesmc           $tmp0,$tmp0
798         aese            $tmp1,q12
799         aesmc           $tmp1,$tmp1
800          veor           $in0,$in0,$rndlast
801          add            $tctr1,$ctr,#2
802         aese            $tmp2,q12
803         aesmc           $tmp2,$tmp2
804          veor           $in1,$in1,$rndlast
805          add            $ctr,$ctr,#3
806         aese            $tmp0,q13
807         aesmc           $tmp0,$tmp0
808         aese            $tmp1,q13
809         aesmc           $tmp1,$tmp1
810          veor           $in2,$in2,$rndlast
811          rev            $tctr0,$tctr0
812         aese            $tmp2,q13
813         aesmc           $tmp2,$tmp2
814          vmov.32        ${dat0}[3], $tctr0
815          rev            $tctr1,$tctr1
816         aese            $tmp0,q14
817         aesmc           $tmp0,$tmp0
818         aese            $tmp1,q14
819         aesmc           $tmp1,$tmp1
820          vmov.32        ${dat1}[3], $tctr1
821          rev            $tctr2,$ctr
822         aese            $tmp2,q14
823         aesmc           $tmp2,$tmp2
824          vmov.32        ${dat2}[3], $tctr2
825          subs           $len,$len,#3
826         aese            $tmp0,q15
827         aese            $tmp1,q15
828         aese            $tmp2,q15
829
830         veor            $in0,$in0,$tmp0
831          vld1.32         {q8},[$key_],#16       // re-pre-load rndkey[0]
832         vst1.8          {$in0},[$out],#16
833         veor            $in1,$in1,$tmp1
834          mov            $cnt,$rounds
835         vst1.8          {$in1},[$out],#16
836         veor            $in2,$in2,$tmp2
837          vld1.32         {q9},[$key_],#16       // re-pre-load rndkey[1]
838         vst1.8          {$in2},[$out],#16
839         b.hs            .Loop3x_ctr32
840
841         adds            $len,$len,#3
842         b.eq            .Lctr32_done
843         cmp             $len,#1
844         mov             $step,#16
845         cclr            $step,eq
846
847 .Lctr32_tail:
848         aese            $dat0,q8
849         aesmc           $dat0,$dat0
850         aese            $dat1,q8
851         aesmc           $dat1,$dat1
852         vld1.32         {q8},[$key_],#16
853         subs            $cnt,$cnt,#2
854         aese            $dat0,q9
855         aesmc           $dat0,$dat0
856         aese            $dat1,q9
857         aesmc           $dat1,$dat1
858         vld1.32         {q9},[$key_],#16
859         b.gt            .Lctr32_tail
860
861         aese            $dat0,q8
862         aesmc           $dat0,$dat0
863         aese            $dat1,q8
864         aesmc           $dat1,$dat1
865         aese            $dat0,q9
866         aesmc           $dat0,$dat0
867         aese            $dat1,q9
868         aesmc           $dat1,$dat1
869          vld1.8         {$in0},[$inp],$step
870         aese            $dat0,q12
871         aesmc           $dat0,$dat0
872         aese            $dat1,q12
873         aesmc           $dat1,$dat1
874          vld1.8         {$in1},[$inp]
875         aese            $dat0,q13
876         aesmc           $dat0,$dat0
877         aese            $dat1,q13
878         aesmc           $dat1,$dat1
879          veor           $in0,$in0,$rndlast
880         aese            $dat0,q14
881         aesmc           $dat0,$dat0
882         aese            $dat1,q14
883         aesmc           $dat1,$dat1
884          veor           $in1,$in1,$rndlast
885         aese            $dat0,q15
886         aese            $dat1,q15
887
888         cmp             $len,#1
889         veor            $in0,$in0,$dat0
890         veor            $in1,$in1,$dat1
891         vst1.8          {$in0},[$out],#16
892         b.eq            .Lctr32_done
893         vst1.8          {$in1},[$out]
894
895 .Lctr32_done:
896 ___
897 $code.=<<___    if ($flavour !~ /64/);
898         vldmia          sp!,{d8-d15}
899         ldmia           sp!,{r4-r10,pc}
900 ___
901 $code.=<<___    if ($flavour =~ /64/);
902         ldr             x29,[sp],#16
903         ret
904 ___
905 $code.=<<___;
906 .size   ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
907 ___
908 }}}
909 $code.=<<___;
910 #endif
911 ___
912 ########################################
913 if ($flavour =~ /64/) {                 ######## 64-bit code
914     my %opcode = (
915         "aesd"  =>      0x4e285800,     "aese"  =>      0x4e284800,
916         "aesimc"=>      0x4e287800,     "aesmc" =>      0x4e286800      );
917
918     local *unaes = sub {
919         my ($mnemonic,$arg)=@_;
920
921         $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o   &&
922         sprintf ".inst\t0x%08x\t//%s %s",
923                         $opcode{$mnemonic}|$1|($2<<5),
924                         $mnemonic,$arg;
925     };
926
927     foreach(split("\n",$code)) {
928         s/\`([^\`]*)\`/eval($1)/geo;
929
930         s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;  # old->new registers
931         s/@\s/\/\//o;                   # old->new style commentary
932
933         #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo     or
934         s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o     or
935         s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel     $2,$3,$2,$1/o   or
936         s/vmov\.i8/movi/o       or      # fix up legacy mnemonics
937         s/vext\.8/ext/o         or
938         s/vrev32\.8/rev32/o     or
939         s/vtst\.8/cmtst/o       or
940         s/vshr/ushr/o           or
941         s/^(\s+)v/$1/o          or      # strip off v prefix
942         s/\bbx\s+lr\b/ret/o;
943
944         # fix up remaining legacy suffixes
945         s/\.[ui]?8//o;
946         m/\],#8/o and s/\.16b/\.8b/go;
947         s/\.[ui]?32//o and s/\.16b/\.4s/go;
948         s/\.[ui]?64//o and s/\.16b/\.2d/go;
949         s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
950
951         print $_,"\n";
952     }
953 } else {                                ######## 32-bit code
954     my %opcode = (
955         "aesd"  =>      0xf3b00340,     "aese"  =>      0xf3b00300,
956         "aesimc"=>      0xf3b003c0,     "aesmc" =>      0xf3b00380      );
957
958     local *unaes = sub {
959         my ($mnemonic,$arg)=@_;
960
961         if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
962             my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
963                                          |(($2&7)<<1) |(($2&8)<<2);
964             # since ARMv7 instructions are always encoded little-endian.
965             # correct solution is to use .inst directive, but older
966             # assemblers don't implement it:-(
967             sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
968                         $word&0xff,($word>>8)&0xff,
969                         ($word>>16)&0xff,($word>>24)&0xff,
970                         $mnemonic,$arg;
971         }
972     };
973
974     sub unvtbl {
975         my $arg=shift;
976
977         $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
978         sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
979                 "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
980     }
981
982     sub unvdup32 {
983         my $arg=shift;
984
985         $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
986         sprintf "vdup.32        q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
987     }
988
989     sub unvmov32 {
990         my $arg=shift;
991
992         $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
993         sprintf "vmov.32        d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
994     }
995
996     foreach(split("\n",$code)) {
997         s/\`([^\`]*)\`/eval($1)/geo;
998
999         s/\b[wx]([0-9]+)\b/r$1/go;              # new->old registers
1000         s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;   # new->old registers
1001         s/\/\/\s?/@ /o;                         # new->old style commentary
1002
1003         # fix up remaining new-style suffixes
1004         s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo    or
1005         s/\],#[0-9]+/]!/o;
1006
1007         s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo      or
1008         s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2     $1,#0/o or
1009         s/vtbl\.8\s+(.*)/unvtbl($1)/geo                 or
1010         s/vdup\.32\s+(.*)/unvdup32($1)/geo              or
1011         s/vmov\.32\s+(.*)/unvmov32($1)/geo              or
1012         s/^(\s+)b\./$1b/o                               or
1013         s/^(\s+)ret/$1bx\tlr/o;
1014
1015         if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
1016             print "     it      $2\n";
1017         }
1018
1019         print $_,"\n";
1020     }
1021 }
1022
1023 close STDOUT;