Compensate inline assembler in sha512.c for gcc 2.7.2 compiler bug.
[openssl.git] / crypto / sha / sha512.c
index 074ac0ebe951f2d8f9269577432c6fa4686b7eee..2284eefc8f86e95cce42bc373813db807f27e55b 100644 (file)
@@ -1,8 +1,11 @@
 /* crypto/sha/sha512.c */
 /* ====================================================================
- * Copyright (c) 2004 The OpenSSL Project.  All rights reserved.
+ * Copyright (c) 2004 The OpenSSL Project.  All rights reserved
+ * according to the OpenSSL license [found in ../../LICENSE].
  * ====================================================================
  */
+#include <openssl/opensslconf.h>
+#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512)
 /*
  * IMPLEMENTATION NOTES.
  *
 #include <stdlib.h>
 #include <string.h>
 
-#include <openssl/opensslconf.h>
 #include <openssl/crypto.h>
 #include <openssl/sha.h>
 #include <openssl/opensslv.h>
 
-const char *SHA512_version="SHA-512" OPENSSL_VERSION_PTEXT;
+#include "cryptlib.h"
+
+const char SHA512_version[]="SHA-512" OPENSSL_VERSION_PTEXT;
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
+    defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \
+    defined(__s390__) || defined(__s390x__) || \
+    defined(SHA512_ASM)
+#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+#endif
 
 int SHA384_Init (SHA512_CTX *c)
        {
+#if defined(SHA512_ASM) && (defined(__arm__) || defined(__arm))
+       /* maintain dword order required by assembler module */
+       unsigned int *h = (unsigned int *)c->h;
+
+       h[0]  = 0xcbbb9d5d; h[1]  = 0xc1059ed8;
+       h[2]  = 0x629a292a; h[3]  = 0x367cd507;
+       h[4]  = 0x9159015a; h[5]  = 0x3070dd17;
+       h[6]  = 0x152fecd8; h[7]  = 0xf70e5939;
+       h[8]  = 0x67332667; h[9]  = 0xffc00b31;
+       h[10] = 0x8eb44a87; h[11] = 0x68581511;
+       h[12] = 0xdb0c2e0d; h[13] = 0x64f98fa7;
+       h[14] = 0x47b5481d; h[15] = 0xbefa4fa4;
+#else
        c->h[0]=U64(0xcbbb9d5dc1059ed8);
        c->h[1]=U64(0x629a292a367cd507);
        c->h[2]=U64(0x9159015a3070dd17);
@@ -58,13 +82,27 @@ int SHA384_Init (SHA512_CTX *c)
        c->h[5]=U64(0x8eb44a8768581511);
        c->h[6]=U64(0xdb0c2e0d64f98fa7);
        c->h[7]=U64(0x47b5481dbefa4fa4);
+#endif
         c->Nl=0;        c->Nh=0;
-        c->num=0;
+        c->num=0;       c->md_len=SHA384_DIGEST_LENGTH;
         return 1;
        }
 
 int SHA512_Init (SHA512_CTX *c)
        {
+#if defined(SHA512_ASM) && (defined(__arm__) || defined(__arm))
+       /* maintain dword order required by assembler module */
+       unsigned int *h = (unsigned int *)c->h;
+
+       h[0]  = 0x6a09e667; h[1]  = 0xf3bcc908;
+       h[2]  = 0xbb67ae85; h[3]  = 0x84caa73b;
+       h[4]  = 0x3c6ef372; h[5]  = 0xfe94f82b;
+       h[6]  = 0xa54ff53a; h[7]  = 0x5f1d36f1;
+       h[8]  = 0x510e527f; h[9]  = 0xade682d1;
+       h[10] = 0x9b05688c; h[11] = 0x2b3e6c1f;
+       h[12] = 0x1f83d9ab; h[13] = 0xfb41bd6b;
+       h[14] = 0x5be0cd19; h[15] = 0x137e2179;
+#else
        c->h[0]=U64(0x6a09e667f3bcc908);
        c->h[1]=U64(0xbb67ae8584caa73b);
        c->h[2]=U64(0x3c6ef372fe94f82b);
@@ -73,73 +111,121 @@ int SHA512_Init (SHA512_CTX *c)
        c->h[5]=U64(0x9b05688c2b3e6c1f);
        c->h[6]=U64(0x1f83d9abfb41bd6b);
        c->h[7]=U64(0x5be0cd19137e2179);
+#endif
         c->Nl=0;        c->Nh=0;
-        c->num=0;
+        c->num=0;       c->md_len=SHA512_DIGEST_LENGTH;
         return 1;
        }
 
-static void sha512_block (SHA512_CTX *ctx, const void *in, size_t num);
+#ifndef SHA512_ASM
+static
+#endif
+void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num);
 
-static int sha512_final (unsigned char *md, SHA512_CTX *c, size_t msz)
+int SHA512_Final (unsigned char *md, SHA512_CTX *c)
        {
        unsigned char *p=(unsigned char *)c->u.p;
        size_t n=c->num;
 
-       p[n]=0x80;
+       p[n]=0x80;      /* There always is a room for one */
        n++;
        if (n > (sizeof(c->u)-16))
                memset (p+n,0,sizeof(c->u)-n), n=0,
-               sha512_block (c,p,1);
+               sha512_block_data_order (c,p,1);
 
        memset (p+n,0,sizeof(c->u)-16-n);
 #ifdef B_ENDIAN
        c->u.d[SHA_LBLOCK-2] = c->Nh;
        c->u.d[SHA_LBLOCK-1] = c->Nl;
 #else
-       p[sizeof(c->u)-1]  = (c->Nl)&0xFF;
-       p[sizeof(c->u)-2]  = (c->Nl>>8)&0xFF;
-       p[sizeof(c->u)-3]  = (c->Nl>>16)&0xFF;
-       p[sizeof(c->u)-4]  = (c->Nl>>24)&0xFF;
-       p[sizeof(c->u)-5]  = (c->Nl>>32)&0xFF;
-       p[sizeof(c->u)-6]  = (c->Nl>>40)&0xFF;
-       p[sizeof(c->u)-7]  = (c->Nl>>48)&0xFF;
-       p[sizeof(c->u)-8]  = (c->Nl>>56)&0xFF;
-       p[sizeof(c->u)-9]  = (c->Nh)&0xFF;
-       p[sizeof(c->u)-10] = (c->Nh>>8)&0xFF;
-       p[sizeof(c->u)-11] = (c->Nh>>16)&0xFF;
-       p[sizeof(c->u)-12] = (c->Nh>>24)&0xFF;
-       p[sizeof(c->u)-13] = (c->Nh>>32)&0xFF;
-       p[sizeof(c->u)-14] = (c->Nh>>40)&0xFF;
-       p[sizeof(c->u)-15] = (c->Nh>>48)&0xFF;
-       p[sizeof(c->u)-16] = (c->Nh>>56)&0xFF;
+       p[sizeof(c->u)-1]  = (unsigned char)(c->Nl);
+       p[sizeof(c->u)-2]  = (unsigned char)(c->Nl>>8);
+       p[sizeof(c->u)-3]  = (unsigned char)(c->Nl>>16);
+       p[sizeof(c->u)-4]  = (unsigned char)(c->Nl>>24);
+       p[sizeof(c->u)-5]  = (unsigned char)(c->Nl>>32);
+       p[sizeof(c->u)-6]  = (unsigned char)(c->Nl>>40);
+       p[sizeof(c->u)-7]  = (unsigned char)(c->Nl>>48);
+       p[sizeof(c->u)-8]  = (unsigned char)(c->Nl>>56);
+       p[sizeof(c->u)-9]  = (unsigned char)(c->Nh);
+       p[sizeof(c->u)-10] = (unsigned char)(c->Nh>>8);
+       p[sizeof(c->u)-11] = (unsigned char)(c->Nh>>16);
+       p[sizeof(c->u)-12] = (unsigned char)(c->Nh>>24);
+       p[sizeof(c->u)-13] = (unsigned char)(c->Nh>>32);
+       p[sizeof(c->u)-14] = (unsigned char)(c->Nh>>40);
+       p[sizeof(c->u)-15] = (unsigned char)(c->Nh>>48);
+       p[sizeof(c->u)-16] = (unsigned char)(c->Nh>>56);
 #endif
 
-       sha512_block (c,p,1);
+       sha512_block_data_order (c,p,1);
 
        if (md==0) return 0;
 
-       for (n=0;msz>0;n++,msz-=8)
+#if defined(SHA512_ASM) && (defined(__arm__) || defined(__arm))
+       /* recall assembler dword order... */
+       n = c->md_len;
+       if (n == SHA384_DIGEST_LENGTH || n == SHA512_DIGEST_LENGTH)
                {
-               SHA_LONG64 t = c->h[n];
+               unsigned int *h = (unsigned int *)c->h, t;
 
-               *(md++) = (t>>56)&0xFF; *(md++) = (t>>48)&0xFF;
-               *(md++) = (t>>40)&0xFF; *(md++) = (t>>32)&0xFF;
-               *(md++) = (t>>24)&0xFF; *(md++) = (t>>16)&0xFF;
-               *(md++) = (t>>8)&0xFF;  *(md++) = (t)&0xFF;
+               for (n/=4;n;n--)
+                       {
+                       t = *(h++);
+                       *(md++) = (unsigned char)(t>>24);
+                       *(md++) = (unsigned char)(t>>16);
+                       *(md++) = (unsigned char)(t>>8);
+                       *(md++) = (unsigned char)(t);
+                       }
                }
-
+       else    return 0;
+#else
+       switch (c->md_len)
+               {
+               /* Let compiler decide if it's appropriate to unroll... */
+               case SHA384_DIGEST_LENGTH:
+                       for (n=0;n<SHA384_DIGEST_LENGTH/8;n++)
+                               {
+                               SHA_LONG64 t = c->h[n];
+
+                               *(md++) = (unsigned char)(t>>56);
+                               *(md++) = (unsigned char)(t>>48);
+                               *(md++) = (unsigned char)(t>>40);
+                               *(md++) = (unsigned char)(t>>32);
+                               *(md++) = (unsigned char)(t>>24);
+                               *(md++) = (unsigned char)(t>>16);
+                               *(md++) = (unsigned char)(t>>8);
+                               *(md++) = (unsigned char)(t);
+                               }
+                       break;
+               case SHA512_DIGEST_LENGTH:
+                       for (n=0;n<SHA512_DIGEST_LENGTH/8;n++)
+                               {
+                               SHA_LONG64 t = c->h[n];
+
+                               *(md++) = (unsigned char)(t>>56);
+                               *(md++) = (unsigned char)(t>>48);
+                               *(md++) = (unsigned char)(t>>40);
+                               *(md++) = (unsigned char)(t>>32);
+                               *(md++) = (unsigned char)(t>>24);
+                               *(md++) = (unsigned char)(t>>16);
+                               *(md++) = (unsigned char)(t>>8);
+                               *(md++) = (unsigned char)(t);
+                               }
+                       break;
+               /* ... as well as make sure md_len is not abused. */
+               default:        return 0;
+               }
+#endif
        return 1;
        }
 
 int SHA384_Final (unsigned char *md,SHA512_CTX *c)
-{   return sha512_final (md,c,SHA384_DIGEST_LENGTH);   }
-int SHA512_Final (unsigned char *md,SHA512_CTX *c)
-{   return sha512_final (md,c,SHA512_DIGEST_LENGTH);   }
+{   return SHA512_Final (md,c);   }
 
 int SHA512_Update (SHA512_CTX *c, const void *_data, size_t len)
        {
        SHA_LONG64      l;
-       unsigned char  *p=c->u.p,*data=(unsigned char *)_data;
+       unsigned char  *p=c->u.p;
+       const unsigned char *data=(const unsigned char *)_data;
 
        if (len==0) return  1;
 
@@ -160,22 +246,22 @@ int SHA512_Update (SHA512_CTX *c, const void *_data, size_t len)
                else    {
                        memcpy (p+c->num,data,n), c->num = 0;
                        len-=n, data+=n;
-                       sha512_block (c,p,1);
+                       sha512_block_data_order (c,p,1);
                        }
                }
 
        if (len >= sizeof(c->u))
                {
 #ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
-               if ((int)data%sizeof(c->u.d[0]) != 0)
+               if ((size_t)data%sizeof(c->u.d[0]) != 0)
                        while (len >= sizeof(c->u))
                                memcpy (p,data,sizeof(c->u)),
-                               sha512_block (c,p,1),
+                               sha512_block_data_order (c,p,1),
                                len  -= sizeof(c->u),
                                data += sizeof(c->u);
                else
 #endif
-                       sha512_block (c,data,len/sizeof(c->u)),
+                       sha512_block_data_order (c,data,len/sizeof(c->u)),
                        data += len,
                        len  %= sizeof(c->u),
                        data -= len;
@@ -190,7 +276,7 @@ int SHA384_Update (SHA512_CTX *c, const void *data, size_t len)
 {   return SHA512_Update (c,data,len);   }
 
 void SHA512_Transform (SHA512_CTX *c, const unsigned char *data)
-{   sha512_block (c,data,1);  }
+{   sha512_block_data_order (c,data,1);  }
 
 unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md)
        {
@@ -200,7 +286,7 @@ unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md)
        if (md == NULL) md=m;
        SHA384_Init(&c);
        SHA512_Update(&c,d,n);
-       sha512_final(md,&c,sizeof(m));
+       SHA512_Final(md,&c);
        OPENSSL_cleanse(&c,sizeof(c));
        return(md);
        }
@@ -213,11 +299,12 @@ unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md)
        if (md == NULL) md=m;
        SHA512_Init(&c);
        SHA512_Update(&c,d,n);
-       sha512_final(md,&c,sizeof(m));
+       SHA512_Final(md,&c);
        OPENSSL_cleanse(&c,sizeof(c));
        return(md);
        }
 
+#ifndef SHA512_ASM
 static const SHA_LONG64 K512[80] = {
         U64(0x428a2f98d728ae22),U64(0x7137449123ef65cd),
         U64(0xb5c0fbcfec4d3b2f),U64(0xe9b5dba58189dbbc),
@@ -260,10 +347,85 @@ static const SHA_LONG64 K512[80] = {
         U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a),
         U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) };
 
-#define B(x,j)    (((SHA_LONG64)(*(((unsigned char *)(&x))+j)))<<((7-j)*8))
+#ifndef PEDANTIC
+# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#  if defined(__x86_64) || defined(__x86_64__)
+#   define ROTR(a,n)   ({ unsigned long ret;           \
+                               asm ("rorq %1,%0"       \
+                               : "=r"(ret)             \
+                               : "J"(n),"0"(a)         \
+                               : "cc"); ret;           })
+#   if !defined(B_ENDIAN)
+#    define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
+                               asm ("bswapq    %0"             \
+                               : "=r"(ret)                     \
+                               : "0"(ret)); ret;               })
+#   endif
+#  elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN)
+#   if defined(I386_ONLY)
+#    define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+                        unsigned int hi=p[0],lo=p[1];          \
+                               asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
+                                   "roll $16,%%eax; roll $16,%%edx; "\
+                                   "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \
+                               : "=a"(lo),"=d"(hi)             \
+                               : "0"(lo),"1"(hi) : "cc");      \
+                               ((SHA_LONG64)hi)<<32|lo;        })
+#   else
+#    define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+                        unsigned int hi=p[0],lo=p[1];          \
+                               asm ("bswapl %0; bswapl %1;"    \
+                               : "=r"(lo),"=r"(hi)             \
+                               : "0"(lo),"1"(hi));             \
+                               ((SHA_LONG64)hi)<<32|lo;        })
+#   endif
+#  elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+#   define ROTR(a,n)   ({ unsigned long ret;           \
+                               asm ("rotrdi %0,%1,%2"  \
+                               : "=r"(ret)             \
+                               : "r"(a),"K"(n)); ret;  })
+#  endif
+# elif defined(_MSC_VER)
+#  if defined(_WIN64)  /* applies to both IA-64 and AMD64 */
+#   define ROTR(a,n)   _rotr64((a),n)
+#  endif
+#  if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#   if defined(I386_ONLY)
+    static SHA_LONG64 __fastcall __pull64be(const void *x)
+    {  _asm    mov     edx, [ecx + 0]
+       _asm    mov     eax, [ecx + 4]
+       _asm    xchg    dh,dl
+       _asm    xchg    ah,al
+       _asm    rol     edx,16
+       _asm    rol     eax,16
+       _asm    xchg    dh,dl
+       _asm    xchg    ah,al
+    }
+#   else
+    static SHA_LONG64 __fastcall __pull64be(const void *x)
+    {  _asm    mov     edx, [ecx + 0]
+       _asm    mov     eax, [ecx + 4]
+       _asm    bswap   edx
+       _asm    bswap   eax
+    }
+#   endif
+#   define PULL64(x) __pull64be(&(x))
+#   if _MSC_VER<=1200
+#    pragma inline_depth(0)
+#   endif
+#  endif
+# endif
+#endif
+
+#ifndef PULL64
+#define B(x,j)    (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8))
 #define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7))
+#endif
 
+#ifndef ROTR
 #define ROTR(x,s)      (((x)>>s) | (x)<<(64-s))
+#endif
+
 #define Sigma0(x)      (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
 #define Sigma1(x)      (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
 #define sigma0(x)      (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
@@ -272,9 +434,68 @@ static const SHA_LONG64 K512[80] = {
 #define Ch(x,y,z)      (((x) & (y)) ^ ((~(x)) & (z)))
 #define Maj(x,y,z)     (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
 
-#ifdef OPENSSL_SMALL_FOOTPRINT
 
-static void sha512_block (SHA512_CTX *ctx, const void *in, size_t num)
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+/*
+ * This code should give better results on 32-bit CPU with less than
+ * ~24 registers, both size and performance wise...
+ */
+static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
+       {
+       const SHA_LONG64 *W=in;
+       SHA_LONG64      A,E,T;
+       SHA_LONG64      X[9+80],*F;
+       int i;
+
+                       while (num--) {
+
+       F    = X+80;
+       A    = ctx->h[0];       F[1] = ctx->h[1];
+       F[2] = ctx->h[2];       F[3] = ctx->h[3];
+       E    = ctx->h[4];       F[5] = ctx->h[5];
+       F[6] = ctx->h[6];       F[7] = ctx->h[7];
+
+       for (i=0;i<16;i++,F--)
+               {
+#ifdef B_ENDIAN
+               T = W[i];
+#else
+               T = PULL64(W[i]);
+#endif
+               F[0] = A;
+               F[4] = E;
+               F[8] = T;
+               T   += F[7] + Sigma1(E) + Ch(E,F[5],F[6]) + K512[i];
+               E    = F[3] + T;
+               A    = T + Sigma0(A) + Maj(A,F[1],F[2]);
+               }
+
+       for (;i<80;i++,F--)
+               {
+               T    = sigma0(F[8+16-1]);
+               T   += sigma1(F[8+16-14]);
+               T   += F[8+16] + F[8+16-9];
+
+               F[0] = A;
+               F[4] = E;
+               F[8] = T;
+               T   += F[7] + Sigma1(E) + Ch(E,F[5],F[6]) + K512[i];
+               E    = F[3] + T;
+               A    = T + Sigma0(A) + Maj(A,F[1],F[2]);
+               }
+
+       ctx->h[0] += A;         ctx->h[1] += F[1];
+       ctx->h[2] += F[2];      ctx->h[3] += F[3];
+       ctx->h[4] += E;         ctx->h[5] += F[5];
+       ctx->h[6] += F[6];      ctx->h[7] += F[7];
+
+                       W+=SHA_LBLOCK;
+                       }
+       }
+
+#elif defined(OPENSSL_SMALL_FOOTPRINT)
+
+static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
        {
        const SHA_LONG64 *W=in;
        SHA_LONG64      a,b,c,d,e,f,g,h,s0,s1,T1,T2;
@@ -314,6 +535,7 @@ static void sha512_block (SHA512_CTX *ctx, const void *in, size_t num)
        ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
        ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
 
+                       W+=SHA_LBLOCK;
                        }
        }
 
@@ -324,13 +546,13 @@ static void sha512_block (SHA512_CTX *ctx, const void *in, size_t num)
        h = Sigma0(a) + Maj(a,b,c);                     \
        d += T1;        h += T1;                } while (0)
 
-#define        ROUND_16_80(i,a,b,c,d,e,f,g,h,X)        do {    \
-       s0 = X[(i+1)&0x0f];     s0 = sigma0(s0);        \
-       s1 = X[(i+14)&0x0f];    s1 = sigma1(s1);        \
-       T1 = X[i&0x0f] += s0 + s1 + X[(i+9)&0x0f];      \
-       ROUND_00_15(i,a,b,c,d,e,f,g,h);         } while (0)
+#define        ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X)      do {    \
+       s0 = X[(j+1)&0x0f];     s0 = sigma0(s0);        \
+       s1 = X[(j+14)&0x0f];    s1 = sigma1(s1);        \
+       T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f];    \
+       ROUND_00_15(i+j,a,b,c,d,e,f,g,h);               } while (0)
 
-static void sha512_block (SHA512_CTX *ctx, const void *in, size_t num)
+static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
        {
        const SHA_LONG64 *W=in;
        SHA_LONG64      a,b,c,d,e,f,g,h,s0,s1,T1;
@@ -378,22 +600,35 @@ static void sha512_block (SHA512_CTX *ctx, const void *in, size_t num)
        T1 = X[15] = PULL64(W[15]);     ROUND_00_15(15,b,c,d,e,f,g,h,a);
 #endif
 
-       for (i=16;i<80;i+=8)
+       for (i=16;i<80;i+=16)
                {
-               ROUND_16_80(i+0,a,b,c,d,e,f,g,h,X);
-               ROUND_16_80(i+1,h,a,b,c,d,e,f,g,X);
-               ROUND_16_80(i+2,g,h,a,b,c,d,e,f,X);
-               ROUND_16_80(i+3,f,g,h,a,b,c,d,e,X);
-               ROUND_16_80(i+4,e,f,g,h,a,b,c,d,X);
-               ROUND_16_80(i+5,d,e,f,g,h,a,b,c,X);
-               ROUND_16_80(i+6,c,d,e,f,g,h,a,b,X);
-               ROUND_16_80(i+7,b,c,d,e,f,g,h,a,X);
+               ROUND_16_80(i, 0,a,b,c,d,e,f,g,h,X);
+               ROUND_16_80(i, 1,h,a,b,c,d,e,f,g,X);
+               ROUND_16_80(i, 2,g,h,a,b,c,d,e,f,X);
+               ROUND_16_80(i, 3,f,g,h,a,b,c,d,e,X);
+               ROUND_16_80(i, 4,e,f,g,h,a,b,c,d,X);
+               ROUND_16_80(i, 5,d,e,f,g,h,a,b,c,X);
+               ROUND_16_80(i, 6,c,d,e,f,g,h,a,b,X);
+               ROUND_16_80(i, 7,b,c,d,e,f,g,h,a,X);
+               ROUND_16_80(i, 8,a,b,c,d,e,f,g,h,X);
+               ROUND_16_80(i, 9,h,a,b,c,d,e,f,g,X);
+               ROUND_16_80(i,10,g,h,a,b,c,d,e,f,X);
+               ROUND_16_80(i,11,f,g,h,a,b,c,d,e,X);
+               ROUND_16_80(i,12,e,f,g,h,a,b,c,d,X);
+               ROUND_16_80(i,13,d,e,f,g,h,a,b,c,X);
+               ROUND_16_80(i,14,c,d,e,f,g,h,a,b,X);
+               ROUND_16_80(i,15,b,c,d,e,f,g,h,a,X);
                }
 
        ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
        ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
 
+                       W+=SHA_LBLOCK;
                        }
        }
 
 #endif
+
+#endif /* SHA512_ASM */
+
+#endif /* OPENSSL_NO_SHA512 */