Temporary workaround for IRIX64 build.
[openssl.git] / crypto / sha / sha1dgst.c
index 18a492d0e6aaeae13a64ce086a95fb5595b9c880..af8d9aed8aa8bd6b1dca77aaa2817c0536897f11 100644 (file)
@@ -64,6 +64,7 @@
 #include "sha_locl.h"
 #include <openssl/opensslv.h>
 
+#ifndef NO_SHA1
 char *SHA1_version="SHA1" OPENSSL_VERSION_PTEXT;
 
 /* Implemented from SHA-1 document - The Secure Hash Algorithm
@@ -80,14 +81,14 @@ char *SHA1_version="SHA1" OPENSSL_VERSION_PTEXT;
 #define K_40_59 0x8f1bbcdcUL
 #define K_60_79 0xca62c1d6UL
 
-#  ifdef SHA1_ASM
-     void sha1_block_x86(SHA_CTX *c, register SHA_LONG *p, int num);
-#    define sha1_block sha1_block_x86
-#  else
-     void sha1_block(SHA_CTX *c, register SHA_LONG *p, int num);
-#  endif
+#ifdef SHA1_ASM
+   void sha1_block_x86(SHA_CTX *c, register SHA_LONG *p, int num);
+#  define sha1_block(c,p,n) sha1_block_x86((c),(p),(n)*SHA_CBLOCK)
+#else
+   static void sha1_block(SHA_CTX *c, register SHA_LONG *p, int num);
+#endif
 
-#if defined(L_ENDIAN) && defined(SHA1_ASM)
+#if !defined(B_ENDIAN) && defined(SHA1_ASM)
 #  define      M_c2nl          c2l
 #  define      M_p_c2nl        p_c2l
 #  define      M_c2nl_p        c2l_p
@@ -113,7 +114,7 @@ void SHA1_Init(SHA_CTX *c)
        c->num=0;
        }
 
-void SHA1_Update(SHA_CTX *c, register unsigned char *data,
+void SHA1_Update(SHA_CTX *c, const register unsigned char *data,
             unsigned long len)
        {
        register SHA_LONG *p;
@@ -146,7 +147,7 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
                                }
                        len-=(SHA_CBLOCK-c->num);
 
-                       sha1_block(c,p,64);
+                       sha1_block(c,p,1);
                        c->num=0;
                        /* drop through and do the rest */
                        }
@@ -183,15 +184,15 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
         * copies it to a local array.  I should be able to do this for
         * the C version as well....
         */
-#if 1
+#if SHA_LONG_LOG2==2
 #if defined(B_ENDIAN) || defined(SHA1_ASM)
        if ((((unsigned long)data)%sizeof(SHA_LONG)) == 0)
                {
                sw=len/SHA_CBLOCK;
                if (sw)
                        {
-                       sw*=SHA_CBLOCK;
                        sha1_block(c,(SHA_LONG *)data,sw);
+                       sw*=SHA_CBLOCK;
                        data+=sw;
                        len-=sw;
                        }
@@ -203,35 +204,61 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
        p=c->data;
        while (len >= SHA_CBLOCK)
                {
-#if defined(B_ENDIAN) || defined(L_ENDIAN)
+#if SHA_LONG_LOG2==2
+#if defined(B_ENDIAN) || defined(SHA1_ASM)
+#define SHA_NO_TAIL_CODE
+               /*
+                * Basically we get here only when data happens
+                * to be unaligned.
+                */
                if (p != (SHA_LONG *)data)
                        memcpy(p,data,SHA_CBLOCK);
                data+=SHA_CBLOCK;
-#  ifdef L_ENDIAN
-#    ifndef SHA1_ASM /* Will not happen */
-               for (sw=(SHA_LBLOCK/4); sw; sw--)
+               sha1_block(c,p=c->data,1);
+               len-=SHA_CBLOCK;
+#elif defined(L_ENDIAN)
+#define BE_COPY(dst,src,i)     {                               \
+                               l = ((SHA_LONG *)src)[i];       \
+                               Endian_Reverse32(l);            \
+                               dst[i] = l;                     \
+                               }
+               if ((((unsigned long)data)%sizeof(SHA_LONG)) == 0)
                        {
-                       Endian_Reverse32(p[0]);
-                       Endian_Reverse32(p[1]);
-                       Endian_Reverse32(p[2]);
-                       Endian_Reverse32(p[3]);
-                       p+=4;
+                       for (sw=(SHA_LBLOCK/4); sw; sw--)
+                               {
+                               BE_COPY(p,data,0);
+                               BE_COPY(p,data,1);
+                               BE_COPY(p,data,2);
+                               BE_COPY(p,data,3);
+                               p+=4;
+                               data += 4*sizeof(SHA_LONG);
+                               }
+                       sha1_block(c,p=c->data,1);
+                       len-=SHA_CBLOCK;
+                       continue;
                        }
+#endif
+#endif
+#ifndef SHA_NO_TAIL_CODE
+               /*
+                * In addition to "sizeof(SHA_LONG)!= 4" case the
+                * following code covers unaligned access cases on
+                * little-endian machines.
+                *                      <appro@fy.chalmers.se>
+                */
                p=c->data;
-#    endif
-#  endif
-#else
-               for (sw=(SHA_BLOCK/4); sw; sw--)
+               for (sw=(SHA_LBLOCK/4); sw; sw--)
                        {
-                       M_c2nl(data,l); *(p++)=l;
-                       M_c2nl(data,l); *(p++)=l;
-                       M_c2nl(data,l); *(p++)=l;
-                       M_c2nl(data,l); *(p++)=l;
+                       M_c2nl(data,l); p[0]=l;
+                       M_c2nl(data,l); p[1]=l;
+                       M_c2nl(data,l); p[2]=l;
+                       M_c2nl(data,l); p[3]=l;
+                       p+=4;
                        }
                p=c->data;
-#endif
-               sha1_block(c,p,64);
+               sha1_block(c,p,1);
                len-=SHA_CBLOCK;
+#endif
                }
        ec=(int)len;
        c->num=ec;
@@ -246,45 +273,59 @@ void SHA1_Update(SHA_CTX *c, register unsigned char *data,
 
 void SHA1_Transform(SHA_CTX *c, unsigned char *b)
        {
-       SHA_LONG p[16];
-#ifndef B_ENDIAN
-       SHA_LONG *q;
-       int i;
-#endif
+       SHA_LONG p[SHA_LBLOCK];
 
-#if defined(B_ENDIAN) || defined(L_ENDIAN)
-       memcpy(p,b,64);
-#ifdef L_ENDIAN
-       q=p;
-       for (i=(SHA_LBLOCK/4); i; i--)
+#if SHA_LONG_LOG2==2
+#if defined(B_ENDIAN) || defined(SHA1_ASM)
+       memcpy(p,b,SHA_CBLOCK);
+       sha1_block(c,p,1);
+       return;
+#elif defined(L_ENDIAN)
+       if (((unsigned long)b%sizeof(SHA_LONG)) == 0)
                {
-               Endian_Reverse32(q[0]);
-               Endian_Reverse32(q[1]);
-               Endian_Reverse32(q[2]);
-               Endian_Reverse32(q[3]);
-               q+=4;
+               SHA_LONG *q;
+               int i;
+
+               q=p;
+               for (i=(SHA_LBLOCK/4); i; i--)
+                       {
+                       unsigned long l;
+                       BE_COPY(q,b,0); /* BE_COPY was defined above */
+                       BE_COPY(q,b,1);
+                       BE_COPY(q,b,2);
+                       BE_COPY(q,b,3);
+                       q+=4;
+                       b+=4*sizeof(SHA_LONG);
+                       }
+               sha1_block(c,p,1);
+               return;
                }
 #endif
-#else
-       q=p;
-       for (i=(SHA_LBLOCK/4); i; i--)
+#endif
+#ifndef SHA_NO_TAIL_CODE /* defined above, see comment */
                {
-               SHA_LONG l;
-               c2nl(b,l); *(q++)=l;
-               c2nl(b,l); *(q++)=l;
-               c2nl(b,l); *(q++)=l;
-               c2nl(b,l); *(q++)=l; 
-               } 
+               SHA_LONG *q;
+               int i;
+       
+               q=p;
+               for (i=(SHA_LBLOCK/4); i; i--)
+                       {
+                       SHA_LONG l;
+                       c2nl(b,l); *(q++)=l;
+                       c2nl(b,l); *(q++)=l;
+                       c2nl(b,l); *(q++)=l;
+                       c2nl(b,l); *(q++)=l; 
+                       } 
+               sha1_block(c,p,1);
+               }
 #endif
-       sha1_block(c,p,64);
        }
 
 #ifndef SHA1_ASM
-
-void sha1_block(SHA_CTX *c, register SHA_LONG *W, int num)
+static void sha1_block(SHA_CTX *c, register SHA_LONG *W, int num)
        {
        register SHA_LONG A,B,C,D,E,T;
-       SHA_LONG X[16];
+       SHA_LONG X[SHA_LBLOCK];
 
        A=c->h0;
        B=c->h1;
@@ -384,8 +425,7 @@ void sha1_block(SHA_CTX *c, register SHA_LONG *W, int num)
        c->h3=(c->h3+B)&0xffffffffL;
        c->h4=(c->h4+C)&0xffffffffL;
 
-       num-=64;
-       if (num <= 0) break;
+       if (--num <= 0) break;
 
        A=c->h0;
        B=c->h1;
@@ -393,7 +433,12 @@ void sha1_block(SHA_CTX *c, register SHA_LONG *W, int num)
        D=c->h3;
        E=c->h4;
 
-       W+=16;
+       W+=SHA_LBLOCK;  /* Note! This can happen only when sizeof(SHA_LONG)
+                        * is 4. Whenever it's not the actual case this
+                        * function is never called with num larger than 1
+                        * and we never advance down here.
+                        *                      <appro@fy.chalmers.se>
+                        */
                }
        }
 #endif
@@ -422,18 +467,20 @@ void SHA1_Final(unsigned char *md, SHA_CTX *c)
                {
                for (; i<SHA_LBLOCK; i++)
                        p[i]=0;
-               sha1_block(c,p,64);
+               sha1_block(c,p,1);
                i=0;
                }
        for (; i<(SHA_LBLOCK-2); i++)
                p[i]=0;
        p[SHA_LBLOCK-2]=c->Nh;
        p[SHA_LBLOCK-1]=c->Nl;
-#if defined(L_ENDIAN) && defined(SHA1_ASM)
+#if SHA_LONG_LOG2==2
+#if !defined(B_ENDIAN) && defined(SHA1_ASM)
        Endian_Reverse32(p[SHA_LBLOCK-2]);
        Endian_Reverse32(p[SHA_LBLOCK-1]);
 #endif
-       sha1_block(c,p,64);
+#endif
+       sha1_block(c,p,1);
        cp=md;
        l=c->h0; nl2c(l,cp);
        l=c->h1; nl2c(l,cp);
@@ -441,9 +488,11 @@ void SHA1_Final(unsigned char *md, SHA_CTX *c)
        l=c->h3; nl2c(l,cp);
        l=c->h4; nl2c(l,cp);
 
-       /* clear stuff, sha1_block may be leaving some stuff on the stack
-        * but I'm not worried :-) */
        c->num=0;
-/*     memset((char *)&c,0,sizeof(c));*/
+       /* sha_block may be leaving some stuff on the stack
+        * but I'm not worried :-)
+       memset((void *)c,0,sizeof(SHA_CTX));
+        */
        }
+#endif