This is essentially Intel 32-bit compiler tune-up. To start with all
[openssl.git] / crypto / md32_common.h
index 3c086a8aae8dc6195d97bd7664742847a9a3d00d..307ec30dfc32417eef04a0dd4e5e0050a3d5530e 100644 (file)
@@ -1,6 +1,6 @@
 /* crypto/md32_common.h */
 /* ====================================================================
- * Copyright (c) 1999 The OpenSSL Project.  All rights reserved.
+ * Copyright (c) 1999-2002 The OpenSSL Project.  All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -94,6 +94,8 @@
  *     in original (data) byte order, implemented externally (it
  *     actually is optional if data and host are of the same
  *     "endianess").
+ * HASH_MAKE_STRING
+ *     macro convering context variables to an ASCII hash string.
  *
  * Optional macros:
  *
  */
 #undef ROTATE
 #ifndef PEDANTIC
-# if defined(_MSC_VER)
-#  define ROTATE(a,n)     _lrotl(a,n)
-# elif defined(__GNUC__) && __GNUC__>=2
+# if defined(_MSC_VER) || defined(__ICC)
+#  define ROTATE(a,n)  _lrotl(a,n)
+# elif defined(__MWERKS__)
+#  if defined(__POWERPC__)
+#   define ROTATE(a,n) __rlwinm(a,n,0,31)
+#  elif defined(__MC68K__)
+    /* Motorola specific tweak. <appro@fy.chalmers.se> */
+#   define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
+#  else
+#   define ROTATE(a,n) __rol(a,n)
+#  endif
+# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
   /*
    * Some GNU C inline assembler templates. Note that these are
    * rotates by *constant* number of bits! But that's exactly
    *
    *                                   <appro@fy.chalmers.se>
    */
-#  if defined(__i386)
+#  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
 #   define ROTATE(a,n) ({ register unsigned int ret;   \
-                               asm volatile (          \
+                               asm (                   \
                                "roll %1,%0"            \
                                : "=r"(ret)             \
                                : "I"(n), "0"(a)        \
                                : "cc");                \
                           ret;                         \
                        })
-#  elif defined(__powerpc)
+#  elif defined(__powerpc) || defined(__ppc)
 #   define ROTATE(a,n) ({ register unsigned int ret;   \
-                               asm volatile (          \
+                               asm (                   \
                                "rlwinm %0,%1,%2,0,31"  \
                                : "=r"(ret)             \
                                : "r"(a), "I"(n));      \
  * Engage compiler specific "fetch in reverse byte order"
  * intrinsic function if available.
  */
-# if defined(__GNUC__) && __GNUC__>=2
+# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
   /* some GNU C inline assembler templates by <appro@fy.chalmers.se> */
-#  if defined(__i386) && !defined(I386_ONLY)
+#  if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY)
 #   define BE_FETCH32(a)       ({ register unsigned int l=(a);\
-                               asm volatile (          \
+                               asm (                   \
                                "bswapl %0"             \
                                : "=r"(l) : "0"(l));    \
                          l;                            \
                        })
 #  elif defined(__powerpc)
 #   define LE_FETCH32(a)       ({ register unsigned int l;     \
-                               asm volatile (          \
+                               asm (                   \
                                "lwbrx %0,0,%1"         \
                                : "=r"(l)               \
                                : "r"(a));              \
                           l;                           \
                        })
 
-#  elif defined(__sparc) && defined(ULTRASPARC)
+#  elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
 #  define LE_FETCH32(a)        ({ register unsigned int l;             \
-                               asm volatile (                  \
+                               asm (                           \
                                "lda [%1]#ASI_PRIMARY_LITTLE,%0"\
                                : "=r"(l)                       \
                                : "r"(a));                      \
 #  endif
 #endif
 
-#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_BLOCK_DATA_ORDER_ALIGNED!=1
+#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
 #ifndef HASH_BLOCK_DATA_ORDER
 #error "HASH_BLOCK_DATA_ORDER must be defined!"
 #endif
  * Time for some action:-)
  */
 
-void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len)
+int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len)
        {
+       const unsigned char *data=data_;
        register HASH_LONG * p;
        register unsigned long l;
        int sw,sc,ew,ec;
 
-       if (len==0) return;
+       if (len==0) return 1;
 
        l=(c->Nl+(len<<3))&0xffffffffL;
        /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
@@ -444,7 +456,10 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len)
                                {
                                ew=(c->num>>2);
                                ec=(c->num&0x03);
-                               l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
+                               if (sc)
+                                       l=p[sw];
+                               HOST_p_c2l(data,l,sc);
+                               p[sw++]=l;
                                for (; sw < ew; sw++)
                                        {
                                        HOST_c2l(data,l); p[sw]=l;
@@ -454,21 +469,22 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len)
                                        HOST_c2l_p(data,l,ec); p[sw]=l;
                                        }
                                }
-                       return;
+                       return 1;
                        }
                }
 
        sw=len/HASH_CBLOCK;
        if (sw > 0)
                {
-#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_BLOCK_DATA_ORDER_ALIGNED!=1
+#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
                /*
                 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
                 * only if sizeof(HASH_LONG)==4.
                 */
                if ((((unsigned long)data)%4) == 0)
                        {
-                       HASH_BLOCK_DATA_ORDER_ALIGNED (c,data,sw);
+                       /* data is properly aligned so that we can cast it: */
+                       HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
                        sw*=HASH_CBLOCK;
                        data+=sw;
                        len-=sw;
@@ -507,14 +523,16 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len)
                HOST_c2l_p(data,l,ec);
                *p=l;
                }
+       return 1;
        }
 
 
 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
        {
-#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_BLOCK_DATA_ORDER_ALIGNED!=1
+#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
        if ((((unsigned long)data)%4) == 0)
-               HASH_BLOCK_DATA_ORDER_ALIGNED (c,data,1);
+               /* data is properly aligned so that we can cast it: */
+               HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
        else
 #if !defined(HASH_BLOCK_DATA_ORDER)
                {
@@ -529,7 +547,7 @@ void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
        }
 
 
-void HASH_FINAL (unsigned char *md, HASH_CTX *c)
+int HASH_FINAL (unsigned char *md, HASH_CTX *c)
        {
        register HASH_LONG *p;
        register unsigned long l;
@@ -579,14 +597,41 @@ void HASH_FINAL (unsigned char *md, HASH_CTX *c)
 #endif
        HASH_BLOCK_HOST_ORDER (c,p,1);
 
-       l=c->A; HOST_l2c(l,md);
-       l=c->B; HOST_l2c(l,md);
-       l=c->C; HOST_l2c(l,md);
-       l=c->D; HOST_l2c(l,md);
+#ifndef HASH_MAKE_STRING
+#error "HASH_MAKE_STRING must be defined!"
+#else
+       HASH_MAKE_STRING(c,md);
+#endif
 
        c->num=0;
        /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
         * but I'm not worried :-)
-       memset((void *)c,0,sizeof(HASH_CTX));
+       OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
         */
+       return 1;
        }
+
+#ifndef MD32_REG_T
+#define MD32_REG_T long
+/*
+ * This comment was originaly written for MD5, which is why it
+ * discusses A-D. But it basically applies to all 32-bit digests,
+ * which is why it was moved to common header file.
+ *
+ * In case you wonder why A-D are declared as long and not
+ * as MD5_LONG. Doing so results in slight performance
+ * boost on LP64 architectures. The catch is we don't
+ * really care if 32 MSBs of a 64-bit register get polluted
+ * with eventual overflows as we *save* only 32 LSBs in
+ * *either* case. Now declaring 'em long excuses the compiler
+ * from keeping 32 MSBs zeroed resulting in 13% performance
+ * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
+ * Well, to be honest it should say that this *prevents* 
+ * performance degradation.
+ *                             <appro@fy.chalmers.se>
+ * Apparently there're LP64 compilers that generate better
+ * code if A-D are declared int. Most notably GCC-x86_64
+ * generates better code.
+ *                             <appro@fy.chalmers.se>
+ */
+#endif