Temporary workaround for IRIX64 build.
[openssl.git] / crypto / sha / sha1dgst.c
index d7fb4d4dfaef3d5113ee9c734554bc479ce4807b..af8d9aed8aa8bd6b1dca77aaa2817c0536897f11 100644 (file)
 #include <string.h>
 #undef  SHA_0
 #define SHA_1
-#include "sha.h"
+#include <openssl/sha.h>
 #include "sha_locl.h"
-#include "opensslv.h"
+#include <openssl/opensslv.h>
 
+#ifndef NO_SHA1
 char *SHA1_version="SHA1" OPENSSL_VERSION_PTEXT;
 
 /* Implemented from SHA-1 document - The Secure Hash Algorithm
  */
 
-#define INIT_DATA_h0 (unsigned long)0x67452301L
-#define INIT_DATA_h1 (unsigned long)0xefcdab89L
-#define INIT_DATA_h2 (unsigned long)0x98badcfeL
-#define INIT_DATA_h3 (unsigned long)0x10325476L
-#define INIT_DATA_h4 (unsigned long)0xc3d2e1f0L
+#define INIT_DATA_h0 0x67452301UL
+#define INIT_DATA_h1 0xefcdab89UL
+#define INIT_DATA_h2 0x98badcfeUL
+#define INIT_DATA_h3 0x10325476UL
+#define INIT_DATA_h4 0xc3d2e1f0UL
 
-#define K_00_19        0x5a827999L
-#define K_20_39 0x6ed9eba1L
-#define K_40_59 0x8f1bbcdcL
-#define K_60_79 0xca62c1d6L
+#define K_00_19        0x5a827999UL
+#define K_20_39 0x6ed9eba1UL
+#define K_40_59 0x8f1bbcdcUL
+#define K_60_79 0xca62c1d6UL
 
-#ifndef NOPROTO
-#  ifdef SHA1_ASM
-     void sha1_block_x86(SHA_CTX *c, register unsigned long *p, int num);
-#    define sha1_block sha1_block_x86
-#  else
-     void sha1_block(SHA_CTX *c, register unsigned long *p, int num);
-#  endif
+#ifdef SHA1_ASM
+   void sha1_block_x86(SHA_CTX *c, register SHA_LONG *p, int num);
+#  define sha1_block(c,p,n) sha1_block_x86((c),(p),(n)*SHA_CBLOCK)
 #else
-#  ifdef SHA1_ASM
-     void sha1_block_x86();
-#    define sha1_block sha1_block_x86
-#  else
-     void sha1_block();
-#  endif
+   static void sha1_block(SHA_CTX *c, register SHA_LONG *p, int num);
 #endif
 
-
-#if defined(L_ENDIAN) && defined(SHA1_ASM)
+#if !defined(B_ENDIAN) && defined(SHA1_ASM)
 #  define      M_c2nl          c2l
 #  define      M_p_c2nl        p_c2l
 #  define      M_c2nl_p        c2l_p
@@ -123,12 +114,12 @@ void SHA1_Init(SHA_CTX *c)
        c->num=0;
        }
 
-void SHA1_Update(SHA_CTX *c, register unsigned char *data,
+void SHA1_Update(SHA_CTX *c, const register unsigned char *data,
             unsigned long len)
        {
-       register ULONG *p;
+       register SHA_LONG *p;
        int ew,ec,sw,sc;
-       ULONG l;
+       SHA_LONG l;
 
        if (len == 0) return;
 
@@ -156,7 +147,7 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
                                }
                        len-=(SHA_CBLOCK-c->num);
 
-                       sha1_block(c,p,64);
+                       sha1_block(c,p,1);
                        c->num=0;
                        /* drop through and do the rest */
                        }
@@ -193,15 +184,15 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
         * copies it to a local array.  I should be able to do this for
         * the C version as well....
         */
-#if 1
+#if SHA_LONG_LOG2==2
 #if defined(B_ENDIAN) || defined(SHA1_ASM)
-       if ((((unsigned long)data)%sizeof(ULONG)) == 0)
+       if ((((unsigned long)data)%sizeof(SHA_LONG)) == 0)
                {
                sw=len/SHA_CBLOCK;
                if (sw)
                        {
+                       sha1_block(c,(SHA_LONG *)data,sw);
                        sw*=SHA_CBLOCK;
-                       sha1_block(c,(ULONG *)data,sw);
                        data+=sw;
                        len-=sw;
                        }
@@ -213,35 +204,61 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
        p=c->data;
        while (len >= SHA_CBLOCK)
                {
-#if defined(B_ENDIAN) || defined(L_ENDIAN)
-               if (p != (unsigned long *)data)
+#if SHA_LONG_LOG2==2
+#if defined(B_ENDIAN) || defined(SHA1_ASM)
+#define SHA_NO_TAIL_CODE
+               /*
+                * Basically we get here only when data happens
+                * to be unaligned.
+                */
+               if (p != (SHA_LONG *)data)
                        memcpy(p,data,SHA_CBLOCK);
                data+=SHA_CBLOCK;
-#  ifdef L_ENDIAN
-#    ifndef SHA1_ASM /* Will not happen */
-               for (sw=(SHA_LBLOCK/4); sw; sw--)
+               sha1_block(c,p=c->data,1);
+               len-=SHA_CBLOCK;
+#elif defined(L_ENDIAN)
+#define BE_COPY(dst,src,i)     {                               \
+                               l = ((SHA_LONG *)src)[i];       \
+                               Endian_Reverse32(l);            \
+                               dst[i] = l;                     \
+                               }
+               if ((((unsigned long)data)%sizeof(SHA_LONG)) == 0)
                        {
-                       Endian_Reverse32(p[0]);
-                       Endian_Reverse32(p[1]);
-                       Endian_Reverse32(p[2]);
-                       Endian_Reverse32(p[3]);
-                       p+=4;
+                       for (sw=(SHA_LBLOCK/4); sw; sw--)
+                               {
+                               BE_COPY(p,data,0);
+                               BE_COPY(p,data,1);
+                               BE_COPY(p,data,2);
+                               BE_COPY(p,data,3);
+                               p+=4;
+                               data += 4*sizeof(SHA_LONG);
+                               }
+                       sha1_block(c,p=c->data,1);
+                       len-=SHA_CBLOCK;
+                       continue;
                        }
+#endif
+#endif
+#ifndef SHA_NO_TAIL_CODE
+               /*
+                * In addition to "sizeof(SHA_LONG)!= 4" case the
+                * following code covers unaligned access cases on
+                * little-endian machines.
+                *                      <appro@fy.chalmers.se>
+                */
                p=c->data;
-#    endif
-#  endif
-#else
-               for (sw=(SHA_BLOCK/4); sw; sw--)
+               for (sw=(SHA_LBLOCK/4); sw; sw--)
                        {
-                       M_c2nl(data,l); *(p++)=l;
-                       M_c2nl(data,l); *(p++)=l;
-                       M_c2nl(data,l); *(p++)=l;
-                       M_c2nl(data,l); *(p++)=l;
+                       M_c2nl(data,l); p[0]=l;
+                       M_c2nl(data,l); p[1]=l;
+                       M_c2nl(data,l); p[2]=l;
+                       M_c2nl(data,l); p[3]=l;
+                       p+=4;
                        }
                p=c->data;
-#endif
-               sha1_block(c,p,64);
+               sha1_block(c,p,1);
                len-=SHA_CBLOCK;
+#endif
                }
        ec=(int)len;
        c->num=ec;
@@ -256,45 +273,59 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
 
 void SHA1_Transform(SHA_CTX *c, unsigned char *b)
        {
-       ULONG p[16];
-#ifndef B_ENDIAN
-       ULONG *q;
-       int i;
-#endif
+       SHA_LONG p[SHA_LBLOCK];
 
-#if defined(B_ENDIAN) || defined(L_ENDIAN)
-       memcpy(p,b,64);
-#ifdef L_ENDIAN
-       q=p;
-       for (i=(SHA_LBLOCK/4); i; i--)
+#if SHA_LONG_LOG2==2
+#if defined(B_ENDIAN) || defined(SHA1_ASM)
+       memcpy(p,b,SHA_CBLOCK);
+       sha1_block(c,p,1);
+       return;
+#elif defined(L_ENDIAN)
+       if (((unsigned long)b%sizeof(SHA_LONG)) == 0)
                {
-               Endian_Reverse32(q[0]);
-               Endian_Reverse32(q[1]);
-               Endian_Reverse32(q[2]);
-               Endian_Reverse32(q[3]);
-               q+=4;
+               SHA_LONG *q;
+               int i;
+
+               q=p;
+               for (i=(SHA_LBLOCK/4); i; i--)
+                       {
+                       unsigned long l;
+                       BE_COPY(q,b,0); /* BE_COPY was defined above */
+                       BE_COPY(q,b,1);
+                       BE_COPY(q,b,2);
+                       BE_COPY(q,b,3);
+                       q+=4;
+                       b+=4*sizeof(SHA_LONG);
+                       }
+               sha1_block(c,p,1);
+               return;
                }
 #endif
-#else
-       q=p;
-       for (i=(SHA_LBLOCK/4); i; i--)
+#endif
+#ifndef SHA_NO_TAIL_CODE /* defined above, see comment */
                {
-               ULONG l;
-               c2nl(b,l); *(q++)=l;
-               c2nl(b,l); *(q++)=l;
-               c2nl(b,l); *(q++)=l;
-               c2nl(b,l); *(q++)=l; 
-               } 
+               SHA_LONG *q;
+               int i;
+       
+               q=p;
+               for (i=(SHA_LBLOCK/4); i; i--)
+                       {
+                       SHA_LONG l;
+                       c2nl(b,l); *(q++)=l;
+                       c2nl(b,l); *(q++)=l;
+                       c2nl(b,l); *(q++)=l;
+                       c2nl(b,l); *(q++)=l; 
+                       } 
+               sha1_block(c,p,1);
+               }
 #endif
-       sha1_block(c,p,64);
        }
 
 #ifndef SHA1_ASM
-
-void sha1_block(SHA_CTX *c, register unsigned long *W, int num)
+static void sha1_block(SHA_CTX *c, register SHA_LONG *W, int num)
        {
-       register ULONG A,B,C,D,E,T;
-       ULONG X[16];
+       register SHA_LONG A,B,C,D,E,T;
+       SHA_LONG X[SHA_LBLOCK];
 
        A=c->h0;
        B=c->h1;
@@ -394,8 +425,7 @@ void sha1_block(SHA_CTX *c, register unsigned long *W, int num)
        c->h3=(c->h3+B)&0xffffffffL;
        c->h4=(c->h4+C)&0xffffffffL;
 
-       num-=64;
-       if (num <= 0) break;
+       if (--num <= 0) break;
 
        A=c->h0;
        B=c->h1;
@@ -403,7 +433,12 @@ void sha1_block(SHA_CTX *c, register unsigned long *W, int num)
        D=c->h3;
        E=c->h4;
 
-       W+=16;
+       W+=SHA_LBLOCK;  /* Note! This can happen only when sizeof(SHA_LONG)
+                        * is 4. Whenever it's not the actual case this
+                        * function is never called with num larger than 1
+                        * and we never advance down here.
+                        *                      <appro@fy.chalmers.se>
+                        */
                }
        }
 #endif
@@ -411,8 +446,8 @@ void sha1_block(SHA_CTX *c, register unsigned long *W, int num)
 void SHA1_Final(unsigned char *md, SHA_CTX *c)
        {
        register int i,j;
-       register ULONG l;
-       register ULONG *p;
+       register SHA_LONG l;
+       register SHA_LONG *p;
        static unsigned char end[4]={0x80,0x00,0x00,0x00};
        unsigned char *cp=end;
 
@@ -432,18 +467,20 @@ void SHA1_Final(unsigned char *md, SHA_CTX *c)
                {
                for (; i<SHA_LBLOCK; i++)
                        p[i]=0;
-               sha1_block(c,p,64);
+               sha1_block(c,p,1);
                i=0;
                }
        for (; i<(SHA_LBLOCK-2); i++)
                p[i]=0;
        p[SHA_LBLOCK-2]=c->Nh;
        p[SHA_LBLOCK-1]=c->Nl;
-#if defined(L_ENDIAN) && defined(SHA1_ASM)
+#if SHA_LONG_LOG2==2
+#if !defined(B_ENDIAN) && defined(SHA1_ASM)
        Endian_Reverse32(p[SHA_LBLOCK-2]);
        Endian_Reverse32(p[SHA_LBLOCK-1]);
 #endif
-       sha1_block(c,p,64);
+#endif
+       sha1_block(c,p,1);
        cp=md;
        l=c->h0; nl2c(l,cp);
        l=c->h1; nl2c(l,cp);
@@ -451,9 +488,11 @@ void SHA1_Final(unsigned char *md, SHA_CTX *c)
        l=c->h3; nl2c(l,cp);
        l=c->h4; nl2c(l,cp);
 
-       /* clear stuff, sha1_block may be leaving some stuff on the stack
-        * but I'm not worried :-) */
        c->num=0;
-/*     memset((char *)&c,0,sizeof(c));*/
+       /* sha_block may be leaving some stuff on the stack
+        * but I'm not worried :-)
+       memset((void *)c,0,sizeof(SHA_CTX));
+        */
        }
+#endif