2 # Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
21 # Initial version was developed in tight cooperation with Ard
22 # Biesheuvel of Linaro from bits-n-pieces from other assembly modules.
23 # Just like aesv8-armx.pl this module supports both AArch32 and
24 # AArch64 execution modes.
28 # Implement 2x aggregated reduction [see ghash-x86.pl for background
33 # AArch64 register bank to "accommodate" 4x aggregated reduction and
34 # improve performance by 20-70% depending on processor.
36 # Current performance in cycles per processed byte:
38 # 64-bit PMULL 32-bit PMULL 32-bit NEON(*)
39 # Apple A7 0.58 0.92 5.62
40 # Cortex-A53 0.85 1.01 8.39
41 # Cortex-A57 0.73 1.17 7.61
42 # Denver 0.51 0.65 6.02
43 # Mongoose 0.65 1.10 8.06
46 # (*) presented for reference/comparison purposes;
51 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
52 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
53 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
54 die "can't locate arm-xlate.pl";
56 open OUT,"| \"$^X\" $xlate $flavour $output";
59 $Xi="x0"; # argument block
67 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
68 my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
69 my $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
74 #if __ARM_MAX_ARCH__>=7
76 $code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
77 $code.=<<___ if ($flavour !~ /64/);
82 # define INST(a,b,c,d) $_byte c,0xef,a,b
85 # define INST(a,b,c,d) $_byte a,b,c,0xf2
91 ################################################################################
92 # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
94 # input: 128-bit H - secret parameter E(K,0^128)
95 # output: precomputed table filled with degrees of twisted H;
96 # H is twisted to handle reverse bitness of GHASH;
97 # only few of 16 slots of Htable[16] are used;
98 # data is opaque to outside world (which allows to
99 # optimize the code independently);
103 .type gcm_init_v8,%function
106 vld1.64 {$t1},[x1] @ load input H
108 vshl.i64 $xC2,$xC2,#57 @ 0xc2.0
109 vext.8 $IN,$t1,$t1,#8
110 vshr.u64 $t2,$xC2,#63
112 vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01
114 vshr.s32 $t1,$t1,#31 @ broadcast carry bit
117 vext.8 $t2,$t2,$t2,#8
119 vorr $IN,$IN,$t2 @ H<<<=1
120 veor $H,$IN,$t0 @ twisted H
121 vst1.64 {$H},[x0],#16 @ store Htable[0]
124 vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing
127 vpmull2.p64 $Xh,$H,$H
128 vpmull.p64 $Xm,$t0,$t0
130 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
134 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
136 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
137 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
140 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
141 vpmull.p64 $Xl,$Xl,$xC2
145 vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing
147 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed
148 vst1.64 {$Hhl-$H2},[x0],#32 @ store Htable[1..2]
150 if ($flavour =~ /64/) {
151 my ($t3,$Yl,$Ym,$Yh) = map("q$_",(4..7));
154 @ calculate H^3 and H^4
155 vpmull.p64 $Xl,$H, $H2
156 vpmull.p64 $Yl,$H2,$H2
157 vpmull2.p64 $Xh,$H, $H2
158 vpmull2.p64 $Yh,$H2,$H2
159 vpmull.p64 $Xm,$t0,$t1
160 vpmull.p64 $Ym,$t1,$t1
162 vext.8 $t0,$Xl,$Xh,#8 @ Karatsuba post-processing
163 vext.8 $t1,$Yl,$Yh,#8
169 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
171 vpmull.p64 $t3,$Yl,$xC2
173 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
175 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
180 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
181 vext.8 $t3,$Yl,$Yl,#8
182 vpmull.p64 $Xl,$Xl,$xC2
183 vpmull.p64 $Yl,$Yl,$xC2
186 veor $H, $Xl,$t2 @ H^3
187 veor $H2,$Yl,$t3 @ H^4
189 vext.8 $t0,$H, $H,#8 @ Karatsuba pre-processing
190 vext.8 $t1,$H2,$H2,#8
193 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed
194 vst1.64 {$H-$H2},[x0] @ store Htable[3..5]
199 .size gcm_init_v8,.-gcm_init_v8
201 ################################################################################
202 # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
204 # input: Xi - current hash value;
205 # Htable - table precomputed in gcm_init_v8;
206 # output: Xi - next hash value Xi;
210 .type gcm_gmult_v8,%function
213 vld1.64 {$t1},[$Xi] @ load Xi
215 vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ...
216 vshl.u64 $xC2,$xC2,#57
220 vext.8 $IN,$t1,$t1,#8
222 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
223 veor $t1,$t1,$IN @ Karatsuba pre-processing
224 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
225 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
227 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
231 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
233 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
234 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
237 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
238 vpmull.p64 $Xl,$Xl,$xC2
245 vext.8 $Xl,$Xl,$Xl,#8
246 vst1.64 {$Xl},[$Xi] @ write out Xi
249 .size gcm_gmult_v8,.-gcm_gmult_v8
251 ################################################################################
252 # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
254 # input: table precomputed in gcm_init_v8;
255 # current hash value Xi;
256 # pointer to input data;
257 # length of input data in bytes, but divisible by block size;
258 # output: next hash value Xi;
262 .type gcm_ghash_v8,%function
266 $code.=<<___ if ($flavour =~ /64/);
268 b.hs .Lgcm_ghash_v8_4x
270 $code.=<<___ if ($flavour !~ /64/);
271 vstmdb sp!,{d8-d15} @ 32-bit ABI says so
274 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
275 @ "[rotated]" means that
276 @ loaded value would have
277 @ to be rotated in order to
278 @ make it appear as in
279 @ algorithm specification
280 subs $len,$len,#32 @ see if $len is 32 or larger
281 mov $inc,#16 @ $inc is used as post-
282 @ increment for input pointer;
283 @ as loop is modulo-scheduled
284 @ $inc is zeroed just in time
285 @ to preclude overstepping
286 @ inp[len], which means that
287 @ last block[s] are actually
288 @ loaded twice, but last
289 @ copy is not processed
290 vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2
292 vld1.64 {$H2},[$Htbl]
293 cclr $inc,eq @ is it time to zero $inc?
294 vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi
295 vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0]
296 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant
301 vext.8 $IN,$t0,$t0,#8 @ rotate I[0]
302 b.lo .Lodd_tail_v8 @ $len was less than 32
304 { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
306 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
307 # [(H*Ii+1) + (H*Xi+1)] mod P =
308 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
311 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1]
315 vext.8 $In,$t1,$t1,#8
316 veor $IN,$IN,$Xl @ I[i]^=Xi
317 vpmull.p64 $Xln,$H,$In @ H·Ii+1
318 veor $t1,$t1,$In @ Karatsuba pre-processing
319 vpmull2.p64 $Xhn,$H,$In
324 vext.8 $t2,$IN,$IN,#8
325 subs $len,$len,#32 @ is there more data?
326 vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo
327 cclr $inc,lo @ is it time to zero $inc?
329 vpmull.p64 $Xmn,$Hhl,$t1
330 veor $t2,$t2,$IN @ Karatsuba pre-processing
331 vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi
332 veor $Xl,$Xl,$Xln @ accumulate
333 vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
334 vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2]
337 cclr $inc,eq @ is it time to zero $inc?
340 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
343 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3]
348 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
353 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
354 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
355 vext.8 $In,$t1,$t1,#8
356 vext.8 $IN,$t0,$t0,#8
358 vpmull.p64 $Xln,$H,$In @ H·Ii+1
359 veor $IN,$IN,$Xh @ accumulate $IN early
361 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
362 vpmull.p64 $Xl,$Xl,$xC2
364 veor $t1,$t1,$In @ Karatsuba pre-processing
366 vpmull2.p64 $Xhn,$H,$In
367 b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes
370 vext.8 $IN,$t0,$t0,#8 @ re-construct $IN
371 adds $len,$len,#32 @ re-construct $len
372 veor $Xl,$Xl,$Xh @ re-construct $Xl
373 b.eq .Ldone_v8 @ is $len zero?
378 vext.8 $t2,$Xl,$Xl,#8
379 veor $IN,$IN,$Xl @ inp^=Xi
380 veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi
382 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
383 veor $t1,$t1,$IN @ Karatsuba pre-processing
384 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
385 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
387 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
391 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
393 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
394 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
397 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
398 vpmull.p64 $Xl,$Xl,$xC2
406 vext.8 $Xl,$Xl,$Xl,#8
407 vst1.64 {$Xl},[$Xi] @ write out Xi
410 $code.=<<___ if ($flavour !~ /64/);
411 vldmia sp!,{d8-d15} @ 32-bit ABI says so
415 .size gcm_ghash_v8,.-gcm_ghash_v8
418 if ($flavour =~ /64/) { # 4x subroutine
420 $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23));
423 .type gcm_ghash_v8_4x,%function
427 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
428 vld1.64 {$H-$H2},[$Htbl],#48 @ load twisted H, ..., H^2
430 vld1.64 {$H3-$H4},[$Htbl] @ load twisted H^3, ..., H^4
431 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant
433 vld1.64 {$I0-$j3},[$inp],#64
441 vext.8 $I3,$j3,$j3,#8
442 vext.8 $I2,$j2,$j2,#8
443 vext.8 $I1,$j1,$j1,#8
445 vpmull.p64 $Yl,$H,$I3 @ H·Ii+3
447 vpmull2.p64 $Yh,$H,$I3
448 vpmull.p64 $Ym,$Hhl,$j3
450 vpmull.p64 $t0,$H2,$I2 @ H^2·Ii+2
452 vpmull2.p64 $I2,$H2,$I2
453 vpmull2.p64 $j2,$Hhl,$j2
459 vpmull.p64 $j3,$H3,$I1 @ H^3·Ii+1
461 vpmull2.p64 $I1,$H3,$I1
462 vpmull.p64 $j1,$H34,$j1
476 vld1.64 {$I0-$j3},[$inp],#64
477 vext.8 $IN,$t0,$t0,#8
485 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
487 vpmull2.p64 $Xh,$H4,$IN
488 vext.8 $I3,$j3,$j3,#8
489 vpmull2.p64 $Xm,$H34,$t0
493 vext.8 $I2,$j2,$j2,#8
495 vext.8 $I1,$j1,$j1,#8
497 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
499 vpmull.p64 $Yl,$H,$I3 @ H·Ii+3
502 vpmull2.p64 $Yh,$H,$I3
504 vpmull.p64 $Ym,$Hhl,$j3
506 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
507 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
508 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
509 vpmull.p64 $t0,$H2,$I2 @ H^2·Ii+2
511 vpmull2.p64 $I2,$H2,$I2
513 vpmull2.p64 $j2,$Hhl,$j2
519 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
520 vpmull.p64 $Xl,$Xl,$xC2
521 vpmull.p64 $j3,$H3,$I1 @ H^3·Ii+1
524 vpmull2.p64 $I1,$H3,$I1
525 vpmull.p64 $j1,$H34,$j1
530 vext.8 $Xl,$Xl,$Xl,#8
538 vext.8 $IN,$t0,$t0,#8
540 vpmull.p64 $Xl,$H4,$IN @ H^4·(Xi+Ii)
542 vpmull2.p64 $Xh,$H4,$IN
543 vpmull2.p64 $Xm,$H34,$t0
556 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
559 vld1.64 {$I0-$j2},[$inp]
567 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
568 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
569 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
570 vext.8 $I2,$j2,$j2,#8
571 vext.8 $I1,$j1,$j1,#8
574 vpmull.p64 $Yl,$H,$I2 @ H·Ii+2
577 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
578 vpmull.p64 $Xl,$Xl,$xC2
580 vpmull2.p64 $Yh,$H,$I2
581 vpmull.p64 $Ym,$Hhl,$j2
583 vpmull.p64 $j3,$H2,$I1 @ H^2·Ii+1
585 vext.8 $Xl,$Xl,$Xl,#8
587 vpmull2.p64 $I1,$H2,$I1
589 vpmull2.p64 $j1,$Hhl,$j1
590 vext.8 $IN,$t0,$t0,#8
596 vpmull.p64 $Xl,$H3,$IN @ H^3·(Xi+Ii)
598 vpmull2.p64 $Xh,$H3,$IN
599 vpmull.p64 $Xm,$H34,$t0
608 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
611 vld1.64 {$I0-$j1},[$inp]
618 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
619 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
620 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
621 vext.8 $I1,$j1,$j1,#8
624 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
625 vpmull.p64 $Xl,$Xl,$xC2
628 vext.8 $Xl,$Xl,$Xl,#8
630 vpmull.p64 $Yl,$H,$I1 @ H·Ii+1
634 vext.8 $IN,$t0,$t0,#8
636 vpmull2.p64 $Yh,$H,$I1
637 vpmull.p64 $Ym,$Hhl,$j1
639 vpmull.p64 $Xl,$H2,$IN @ H^2·(Xi+Ii)
641 vpmull2.p64 $Xh,$H2,$IN
642 vpmull2.p64 $Xm,$Hhl,$t0
651 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
660 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
661 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
662 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
665 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
666 vpmull.p64 $Xl,$Xl,$xC2
669 vext.8 $Xl,$Xl,$Xl,#8
672 vext.8 $IN,$t0,$t0,#8
674 vpmull.p64 $Xl,$H,$IN
676 vpmull2.p64 $Xh,$H,$IN
677 vpmull.p64 $Xm,$Hhl,$t0
680 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
685 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
686 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
687 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
690 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
691 vpmull.p64 $Xl,$Xl,$xC2
694 vext.8 $Xl,$Xl,$Xl,#8
699 vst1.64 {$Xl},[$Xi] @ write out Xi
702 .size gcm_ghash_v8_4x,.-gcm_ghash_v8_4x
709 .asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
714 if ($flavour =~ /64/) { ######## 64-bit code
718 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
719 sprintf "ins v%d.d[%d],v%d.d[%d]",$1<8?$1:$1+8,($2 eq "lo")?0:1,
720 $3<8?$3:$3+8,($4 eq "lo")?0:1;
722 foreach(split("\n",$code)) {
723 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
724 s/vmov\.i8/movi/o or # fix up legacy mnemonics
725 s/vmov\s+(.*)/unvmov($1)/geo or
727 s/vshr\.s/sshr\.s/o or
729 s/^(\s+)v/$1/o or # strip off v prefix
732 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
733 s/@\s/\/\//o; # old->new style commentary
735 # fix up remaining legacy suffixes
737 s/\.[uis]?32//o and s/\.16b/\.4s/go;
738 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
739 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
740 s/\.[uisp]?64//o and s/\.16b/\.2d/go;
741 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
745 } else { ######## 32-bit code
749 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
750 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
753 my ($mnemonic,$arg)=@_;
755 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
756 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
757 |(($2&7)<<17)|(($2&8)<<4)
758 |(($3&7)<<1) |(($3&8)<<2);
759 $word |= 0x00010001 if ($mnemonic =~ "2");
760 # since ARMv7 instructions are always encoded little-endian.
761 # correct solution is to use .inst directive, but older
762 # assemblers don't implement it:-(
763 sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
764 $word&0xff,($word>>8)&0xff,
765 ($word>>16)&0xff,($word>>24)&0xff,
770 foreach(split("\n",$code)) {
771 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
772 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
773 s/\/\/\s?/@ /o; # new->old style commentary
775 # fix up remaining new-style suffixes
778 s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2 $1,#0/o or
779 s/vdup\.32\s+(.*)/unvdup32($1)/geo or
780 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
781 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
783 s/^(\s+)ret/$1bx\tlr/o;
785 if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
793 close STDOUT; # enforce flush