crypto/bn/asm/rsaz-x86_64.pl: make it work on Win64.
[openssl.git] / crypto / md32_common.h
index 1cb783944ea69ecdd2ed3cc9ba8234dd785ce5f7..147a7a00c30e3649ad5695cc49b3776a0720d594 100644 (file)
  */
 #undef ROTATE
 #ifndef PEDANTIC
-# if defined(_MSC_VER) || defined(__ICC)
+# if defined(_MSC_VER)
 #  define ROTATE(a,n)  _lrotl(a,n)
+# elif defined(__ICC)
+#  define ROTATE(a,n)  _rotl(a,n)
 # elif defined(__MWERKS__)
 #  if defined(__POWERPC__)
 #   define ROTATE(a,n) __rlwinm(a,n,0,31)
                                asm (                   \
                                "roll %1,%0"            \
                                : "=r"(ret)             \
-                               : "I"(n), "0"(a)        \
+                               : "I"(n), "0"((unsigned int)(a))        \
                                : "cc");                \
                           ret;                         \
                        })
                                   asm ("bswapl %0":"=r"(r):"0"(r));    \
                                   *((unsigned int *)(c))=r; (c)+=4; r; })
 #   endif
+#  elif defined(__aarch64__)
+#   if defined(__BYTE_ORDER__)
+#    if defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
+#     define HOST_c2l(c,l)     ({ unsigned int r;              \
+                                  asm ("rev    %w0,%w1"        \
+                                       :"=r"(r)                \
+                                       :"r"(*((const unsigned int *)(c))));\
+                                  (c)+=4; (l)=r;               })
+#     define HOST_l2c(l,c)     ({ unsigned int r;              \
+                                  asm ("rev    %w0,%w1"        \
+                                       :"=r"(r)                \
+                                       :"r"((unsigned int)(l)));\
+                                  *((unsigned int *)(c))=r; (c)+=4; r; })
+#    elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+#     define HOST_c2l(c,l)     ((l)=*((const unsigned int *)(c)), (c)+=4, (l))
+#     define HOST_l2c(l,c)     (*((unsigned int *)(c))=(l), (c)+=4, (l))
+#    endif
+#   endif
 #  endif
 # endif
 #endif
@@ -383,6 +403,7 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c)
        }
 
 #ifndef MD32_REG_T
+#if defined(__alpha) || defined(__sparcv9) || defined(__mips)
 #define MD32_REG_T long
 /*
  * This comment was originaly written for MD5, which is why it
@@ -400,9 +421,15 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c)
  * Well, to be honest it should say that this *prevents* 
  * performance degradation.
  *                             <appro@fy.chalmers.se>
- * Apparently there're LP64 compilers that generate better
- * code if A-D are declared int. Most notably GCC-x86_64
- * generates better code.
+ */
+#else
+/*
+ * Above is not absolute and there are LP64 compilers that
+ * generate better code if MD32_REG_T is defined int. The above
+ * pre-processor condition reflects the circumstances under which
+ * the conclusion was made and is subject to further extension.
  *                             <appro@fy.chalmers.se>
  */
+#define MD32_REG_T int
+#endif
 #endif