Reduce inputs before the RSAZ code.
[openssl.git] / crypto / x86_64cpuid.pl
index 40d42135bbc8747891b0794b9ccfe07a47c66652..6423e803b759542dcc23999a8d1c239feccec4a7 100644 (file)
@@ -1,4 +1,11 @@
-#!/usr/bin/env perl
+#! /usr/bin/env perl
+# Copyright 2005-2018 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
 
 $flavour = shift;
 $output  = shift;
@@ -7,7 +14,12 @@ if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
 
 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-open STDOUT,"| $^X ${dir}perlasm/x86_64-xlate.pl $flavour $output";
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
 
 ($arg1,$arg2,$arg3,$arg4)=$win64?("%rcx","%rdx","%r8", "%r9") :        # Win64 order
                                 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
@@ -19,7 +31,7 @@ print<<___;
        call    OPENSSL_cpuid_setup
 
 .hidden        OPENSSL_ia32cap_P
-.comm  OPENSSL_ia32cap_P,8
+.comm  OPENSSL_ia32cap_P,16,4
 
 .text
 
@@ -48,12 +60,15 @@ OPENSSL_rdtsc:
 .size  OPENSSL_rdtsc,.-OPENSSL_rdtsc
 
 .globl OPENSSL_ia32_cpuid
-.type  OPENSSL_ia32_cpuid,\@abi-omnipotent
+.type  OPENSSL_ia32_cpuid,\@function,1
 .align 16
 OPENSSL_ia32_cpuid:
+.cfi_startproc
        mov     %rbx,%r8                # save %rbx
+.cfi_register  %rbx,%r8
 
        xor     %eax,%eax
+       mov     %rax,8(%rdi)            # clear extended feature flags
        cpuid
        mov     %eax,%r11d              # max value for standard query level
 
@@ -124,13 +139,26 @@ OPENSSL_ia32_cpuid:
 .Lnocacheinfo:
        mov     \$1,%eax
        cpuid
+       movd    %eax,%xmm0              # put aside processor id
+       and     \$0xbfefffff,%edx       # force reserved bits to 0
        cmp     \$0,%r9d
        jne     .Lnotintel
-       or      \$0x00100000,%edx       # use reserved 20th bit to engage RC4_CHAR
+       or      \$0x40000000,%edx       # set reserved bit#30 on Intel CPUs
        and     \$15,%ah
        cmp     \$15,%ah                # examine Family ID
-       je      .Lnotintel
-       or      \$0x40000000,%edx       # use reserved bit to skip unrolled loop
+       jne     .LnotP4
+       or      \$0x00100000,%edx       # set reserved bit#20 to engage RC4_CHAR
+.LnotP4:
+       cmp     \$6,%ah
+       jne     .Lnotintel
+       and     \$0x0fff0ff0,%eax
+       cmp     \$0x00050670,%eax       # Knights Landing
+       je      .Lknights
+       cmp     \$0x00080650,%eax       # Knights Mill (according to sde)
+       jne     .Lnotintel
+.Lknights:
+       and     \$0xfbffffff,%ecx       # clear XSAVE flag to mimic Silvermont
+
 .Lnotintel:
        bt      \$28,%edx               # test hyper-threading bit
        jnc     .Lgeneric
@@ -146,26 +174,58 @@ OPENSSL_ia32_cpuid:
 .Lgeneric:
        and     \$0x00000800,%r9d       # isolate AMD XOP flag
        and     \$0xfffff7ff,%ecx
-       or      %r9d,%ecx               # merge AMD XOP flag
+       or      %ecx,%r9d               # merge AMD XOP flag
+
+       mov     %edx,%r10d              # %r9d:%r10d is copy of %ecx:%edx
 
-       shl     \$32,%rcx
-       mov     %edx,%ebx
-       or      %rcx,%rbx               # compose capability vector in %rbx
-       bt      \$27+32,%rcx            # check OSXSAVE bit
+       cmp     \$7,%r11d
+       jb      .Lno_extended_info
+       mov     \$7,%eax
+       xor     %ecx,%ecx
+       cpuid
+       bt      \$26,%r9d               # check XSAVE bit, cleared on Knights
+       jc      .Lnotknights
+       and     \$0xfff7ffff,%ebx       # clear ADCX/ADOX flag
+.Lnotknights:
+       movd    %xmm0,%eax              # restore processor id
+       and     \$0x0fff0ff0,%eax
+       cmp     \$0x00050650,%eax       # Skylake-X
+       jne     .Lnotskylakex
+       and     \$0xfffeffff,%ebx       # ~(1<<16)
+                                       # suppress AVX512F flag on Skylake-X
+.Lnotskylakex:
+       mov     %ebx,8(%rdi)            # save extended feature flags
+       mov     %ecx,12(%rdi)
+.Lno_extended_info:
+
+       bt      \$27,%r9d               # check OSXSAVE bit
        jnc     .Lclear_avx
        xor     %ecx,%ecx               # XCR0
        .byte   0x0f,0x01,0xd0          # xgetbv
+       and     \$0xe6,%eax             # isolate XMM, YMM and ZMM state support
+       cmp     \$0xe6,%eax
+       je      .Ldone
+       andl    \$0x3fdeffff,8(%rdi)    # ~(1<<31|1<<30|1<<21|1<<16)
+                                       # clear AVX512F+BW+VL+FIMA, all of
+                                       # them are EVEX-encoded, which requires
+                                       # ZMM state support even if one uses
+                                       # only XMM and YMM :-(
        and     \$6,%eax                # isolate XMM and YMM state support
        cmp     \$6,%eax
        je      .Ldone
 .Lclear_avx:
        mov     \$0xefffe7ff,%eax       # ~(1<<28|1<<12|1<<11)
-       shl     \$32,%rax
-       and     %rax,%rbx               # clear AVX, FMA and AMD XOP bits
+       and     %eax,%r9d               # clear AVX, FMA and AMD XOP bits
+       mov     \$0x3fdeffdf,%eax       # ~(1<<31|1<<30|1<<21|1<<16|1<<5)
+       and     %eax,8(%rdi)            # clear AVX2 and AVX512* bits
 .Ldone:
-       mov     %rbx,%rax
+       shl     \$32,%r9
+       mov     %r10d,%eax
        mov     %r8,%rbx                # restore %rbx
+.cfi_restore   %rbx
+       or      %r9,%rax
        ret
+.cfi_endproc
 .size  OPENSSL_ia32_cpuid,.-OPENSSL_ia32_cpuid
 
 .globl  OPENSSL_cleanse
@@ -202,6 +262,40 @@ OPENSSL_cleanse:
        jne     .Little
        ret
 .size  OPENSSL_cleanse,.-OPENSSL_cleanse
+
+.globl  CRYPTO_memcmp
+.type   CRYPTO_memcmp,\@abi-omnipotent
+.align  16
+CRYPTO_memcmp:
+       xor     %rax,%rax
+       xor     %r10,%r10
+       cmp     \$0,$arg3
+       je      .Lno_data
+       cmp     \$16,$arg3
+       jne     .Loop_cmp
+       mov     ($arg1),%r10
+       mov     8($arg1),%r11
+       mov     \$1,$arg3
+       xor     ($arg2),%r10
+       xor     8($arg2),%r11
+       or      %r11,%r10
+       cmovnz  $arg3,%rax
+       ret
+
+.align 16
+.Loop_cmp:
+       mov     ($arg1),%r10b
+       lea     1($arg1),$arg1
+       xor     ($arg2),%r10b
+       lea     1($arg2),$arg2
+       or      %r10b,%al
+       dec     $arg3
+       jnz     .Loop_cmp
+       neg     %rax
+       shr     \$63,%rax
+.Lno_data:
+       ret
+.size  CRYPTO_memcmp,.-CRYPTO_memcmp
 ___
 
 print<<___ if (!$win64);
@@ -349,4 +443,53 @@ OPENSSL_instrument_bus2:
 ___
 }
 
+sub gen_random {
+my $rdop = shift;
+print<<___;
+.globl OPENSSL_ia32_${rdop}_bytes
+.type  OPENSSL_ia32_${rdop}_bytes,\@abi-omnipotent
+.align 16
+OPENSSL_ia32_${rdop}_bytes:
+       xor     %rax, %rax      # return value
+       cmp     \$0,$arg2
+       je      .Ldone_${rdop}_bytes
+
+       mov     \$8,%r11
+.Loop_${rdop}_bytes:
+       ${rdop} %r10
+       jc      .Lbreak_${rdop}_bytes
+       dec     %r11
+       jnz     .Loop_${rdop}_bytes
+       jmp     .Ldone_${rdop}_bytes
+
+.align 16
+.Lbreak_${rdop}_bytes:
+       cmp     \$8,$arg2
+       jb      .Ltail_${rdop}_bytes
+       mov     %r10,($arg1)
+       lea     8($arg1),$arg1
+       add     \$8,%rax
+       sub     \$8,$arg2
+       jz      .Ldone_${rdop}_bytes
+       mov     \$8,%r11
+       jmp     .Loop_${rdop}_bytes
+
+.align 16
+.Ltail_${rdop}_bytes:
+       mov     %r10b,($arg1)
+       lea     1($arg1),$arg1
+       inc     %rax
+       shr     \$8,%r10
+       dec     $arg2
+       jnz     .Ltail_${rdop}_bytes
+
+.Ldone_${rdop}_bytes:
+       xor     %r10,%r10       # Clear sensitive data from register
+       ret
+.size  OPENSSL_ia32_${rdop}_bytes,.-OPENSSL_ia32_${rdop}_bytes
+___
+}
+gen_random("rdrand");
+gen_random("rdseed");
+
 close STDOUT;  # flush