Only implement secure malloc if _POSIX_VERSION allows
[openssl.git] / crypto / x86_64cpuid.pl
index c2a7d72b0ee9b241993211fb0ebd51b6b09179a1..0a88c7a4ed47ae2ac4e291268d2a0b8d19a6dae5 100644 (file)
@@ -68,20 +68,10 @@ OPENSSL_ia32_cpuid:
 .cfi_register  %rbx,%r8
 
        xor     %eax,%eax
-       mov     %eax,8(%rdi)            # clear 3rd word
+       mov     %rax,8(%rdi)            # clear extended feature flags
        cpuid
        mov     %eax,%r11d              # max value for standard query level
 
-       cmp     \$7,%eax
-       jb      .Lno_extended_info
-
-       mov     \$7,%eax
-       xor     %ecx,%ecx
-       cpuid
-       mov     %ebx,8(%rdi)
-
-.Lno_extended_info:
-
        xor     %eax,%eax
        cmp     \$0x756e6547,%ebx       # "Genu"
        setne   %al
@@ -149,14 +139,26 @@ OPENSSL_ia32_cpuid:
 .Lnocacheinfo:
        mov     \$1,%eax
        cpuid
+       movd    %eax,%xmm0              # put aside processor id
        and     \$0xbfefffff,%edx       # force reserved bits to 0
        cmp     \$0,%r9d
        jne     .Lnotintel
        or      \$0x40000000,%edx       # set reserved bit#30 on Intel CPUs
        and     \$15,%ah
        cmp     \$15,%ah                # examine Family ID
-       jne     .Lnotintel
+       jne     .LnotP4
        or      \$0x00100000,%edx       # set reserved bit#20 to engage RC4_CHAR
+.LnotP4:
+       cmp     \$6,%ah
+       jne     .Lnotintel
+       and     \$0x0fff0ff0,%eax
+       cmp     \$0x00050670,%eax       # Knights Landing
+       je      .Lknights
+       cmp     \$0x00080650,%eax       # Knights Mill (according to sde)
+       jne     .Lnotintel
+.Lknights:
+       and     \$0xfbffffff,%ecx       # clear XSAVE flag to mimic Silvermont
+
 .Lnotintel:
        bt      \$28,%edx               # test hyper-threading bit
        jnc     .Lgeneric
@@ -175,6 +177,27 @@ OPENSSL_ia32_cpuid:
        or      %ecx,%r9d               # merge AMD XOP flag
 
        mov     %edx,%r10d              # %r9d:%r10d is copy of %ecx:%edx
+
+       cmp     \$7,%r11d
+       jb      .Lno_extended_info
+       mov     \$7,%eax
+       xor     %ecx,%ecx
+       cpuid
+       bt      \$26,%r9d               # check XSAVE bit, cleared on Knights
+       jc      .Lnotknights
+       and     \$0xfff7ffff,%ebx       # clear ADCX/ADOX flag
+.Lnotknights:
+       movd    %xmm0,%eax              # restore processor id
+       and     \$0x0fff0ff0,%eax
+       cmp     \$0x00050650,%eax       # Skylake-X
+       jne     .Lnotskylakex
+       and     \$0xfffeffff,%ebx       # ~(1<<16)
+                                       # suppress AVX512F flag on Skylake-X
+.Lnotskylakex:
+       mov     %ebx,8(%rdi)            # save extended feature flags
+       mov     %ecx,12(%rdi)
+.Lno_extended_info:
+
        bt      \$27,%r9d               # check OSXSAVE bit
        jnc     .Lclear_avx
        xor     %ecx,%ecx               # XCR0
@@ -182,10 +205,11 @@ OPENSSL_ia32_cpuid:
        and     \$0xe6,%eax             # isolate XMM, YMM and ZMM state support
        cmp     \$0xe6,%eax
        je      .Ldone
-       andl    \$0xfffeffff,8(%rdi)    # clear AVX512F, ~(1<<16)
-                                       # note that we don't touch other AVX512
-                                       # extensions, because they can be used
-                                       # with YMM (without opmasking though)
+       andl    \$0x3fdeffff,8(%rdi)    # ~(1<<31|1<<30|1<<21|1<<16)
+                                       # clear AVX512F+BW+VL+FIMA, all of
+                                       # them are EVEX-encoded, which requires
+                                       # ZMM state support even if one uses
+                                       # only XMM and YMM :-(
        and     \$6,%eax                # isolate XMM and YMM state support
        cmp     \$6,%eax
        je      .Ldone
@@ -193,7 +217,7 @@ OPENSSL_ia32_cpuid:
        mov     \$0xefffe7ff,%eax       # ~(1<<28|1<<12|1<<11)
        and     %eax,%r9d               # clear AVX, FMA and AMD XOP bits
        mov     \$0x3fdeffdf,%eax       # ~(1<<31|1<<30|1<<21|1<<16|1<<5)
-       and     %eax,8(%rdi)            # cleax AVX2 and AVX512* bits
+       and     %eax,8(%rdi)            # clear AVX2 and AVX512* bits
 .Ldone:
        shl     \$32,%r9
        mov     %r10d,%eax