X-Git-Url: https://git.openssl.org/gitweb/?a=blobdiff_plain;f=crypto%2Fsha%2Fsha512.c;h=e94de4370b61c93801214ea54ec68b2c5ffdd97c;hb=0d7903f83f84bba1d29225efd999c633a0c5ba01;hp=ebae411f137d0745b4c1e894095e9d598b43f511;hpb=b39fc560612984e65ec30d7f37487303bf514fb3;p=openssl.git diff --git a/crypto/sha/sha512.c b/crypto/sha/sha512.c index ebae411f13..e94de4370b 100644 --- a/crypto/sha/sha512.c +++ b/crypto/sha/sha512.c @@ -1,9 +1,12 @@ -/* crypto/sha/sha512.c */ -/* ==================================================================== - * Copyright (c) 2004 The OpenSSL Project. All rights reserved - * according to the OpenSSL license [found in ../../LICENSE]. - * ==================================================================== +/* + * Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html */ + #include /*- * IMPLEMENTATION NOTES. @@ -49,8 +52,6 @@ #include "internal/cryptlib.h" -const char SHA512_version[] = "SHA-512" OPENSSL_VERSION_PTEXT; - #if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \ defined(__s390__) || defined(__s390x__) || \ @@ -142,7 +143,7 @@ int SHA512_Final(unsigned char *md, SHA512_CTX *c) return 0; switch (c->md_len) { - /* Let compiler decide if it's appropriate to unroll... */ + /* Let compiler decide if it's appropriate to unroll... */ case SHA384_DIGEST_LENGTH: for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) { SHA_LONG64 t = c->h[n]; @@ -171,7 +172,7 @@ int SHA512_Final(unsigned char *md, SHA512_CTX *c) *(md++) = (unsigned char)(t); } break; - /* ... as well as make sure md_len is not abused. */ + /* ... as well as make sure md_len is not abused. */ default: return 0; } @@ -218,12 +219,12 @@ int SHA512_Update(SHA512_CTX *c, const void *_data, size_t len) if ((size_t)data % sizeof(c->u.d[0]) != 0) while (len >= sizeof(c->u)) memcpy(p, data, sizeof(c->u)), - sha512_block_data_order(c, p, 1), - len -= sizeof(c->u), data += sizeof(c->u); + sha512_block_data_order(c, p, 1), + len -= sizeof(c->u), data += sizeof(c->u); else #endif sha512_block_data_order(c, data, len / sizeof(c->u)), - data += len, len %= sizeof(c->u), data -= len; + data += len, len %= sizeof(c->u), data -= len; } if (len != 0) @@ -319,9 +320,10 @@ static const SHA_LONG64 K512[80] = { }; # ifndef PEDANTIC -# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(__GNUC__) && __GNUC__>=2 && \ + !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) # if defined(__x86_64) || defined(__x86_64__) -# define ROTR(a,n) ({ SHA_LONG64 ret; \ +# define ROTR(a,n) ({ SHA_LONG64 ret; \ asm ("rorq %1,%0" \ : "=r"(ret) \ : "J"(n),"0"(a) \ @@ -335,37 +337,37 @@ static const SHA_LONG64 K512[80] = { # elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) # if defined(I386_ONLY) # define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ - unsigned int hi=p[0],lo=p[1]; \ + unsigned int hi=p[0],lo=p[1]; \ asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\ "roll $16,%%eax; roll $16,%%edx; "\ - "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \ + "xchgb %%ah,%%al;xchgb %%dh,%%dl;"\ : "=a"(lo),"=d"(hi) \ : "0"(lo),"1"(hi) : "cc"); \ ((SHA_LONG64)hi)<<32|lo; }) # else # define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ - unsigned int hi=p[0],lo=p[1]; \ + unsigned int hi=p[0],lo=p[1]; \ asm ("bswapl %0; bswapl %1;" \ : "=r"(lo),"=r"(hi) \ : "0"(lo),"1"(hi)); \ ((SHA_LONG64)hi)<<32|lo; }) # endif # elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64) -# define ROTR(a,n) ({ SHA_LONG64 ret; \ +# define ROTR(a,n) ({ SHA_LONG64 ret; \ asm ("rotrdi %0,%1,%2" \ : "=r"(ret) \ : "r"(a),"K"(n)); ret; }) # elif defined(__aarch64__) -# define ROTR(a,n) ({ SHA_LONG64 ret; \ +# define ROTR(a,n) ({ SHA_LONG64 ret; \ asm ("ror %0,%1,%2" \ : "=r"(ret) \ : "r"(a),"I"(n)); ret; }) # if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ -# define PULL64(x) ({ SHA_LONG64 ret; \ +# define PULL64(x) ({ SHA_LONG64 ret; \ asm ("rev %0,%1" \ : "=r"(ret) \ - : "r"(*((const SHA_LONG64 *)(&(x))))); ret; }) + : "r"(*((const SHA_LONG64 *)(&(x))))); ret; }) # endif # endif # elif defined(_MSC_VER) @@ -373,21 +375,28 @@ static const SHA_LONG64 K512[80] = { # pragma intrinsic(_rotr64) # define ROTR(a,n) _rotr64((a),n) # endif -# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && \ + !defined(OPENSSL_NO_INLINE_ASM) # if defined(I386_ONLY) static SHA_LONG64 __fastcall __pull64be(const void *x) { - _asm mov edx,[ecx + 0] - _asm mov eax,[ecx + 4] -_asm xchg dh, dl - _asm xchg ah, al - _asm rol edx, 16 _asm rol eax, 16 _asm xchg dh, dl _asm xchg ah, al} + _asm mov edx,[ecx + 0] + _asm mov eax,[ecx + 4] + _asm xchg dh, dl + _asm xchg ah, al + _asm rol edx, 16 + _asm rol eax, 16 + _asm xchg dh, dl + _asm xchg ah, al +} # else static SHA_LONG64 __fastcall __pull64be(const void *x) { - _asm mov edx,[ecx + 0] - _asm mov eax,[ecx + 4] -_asm bswap edx _asm bswap eax} + _asm mov edx,[ecx + 0] + _asm mov eax,[ecx + 4] + _asm bswap edx + _asm bswap eax +} # endif # define PULL64(x) __pull64be(&(x)) # if _MSC_VER<=1200 @@ -409,12 +418,15 @@ _asm bswap edx _asm bswap eax} # define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) # define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) # define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) + # if defined(__i386) || defined(__i386__) || defined(_M_IX86) /* * This code should give better results on 32-bit CPU with less than * ~24 registers, both size and performance wise... - */ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, - size_t num) + */ + +static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, + size_t num) { const SHA_LONG64 *W = in; SHA_LONG64 A, E, T; @@ -474,6 +486,7 @@ _asm bswap edx _asm bswap eax} } # elif defined(OPENSSL_SMALL_FOOTPRINT) + static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num) { @@ -544,15 +557,17 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, } # else -# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ +# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \ h = Sigma0(a) + Maj(a,b,c); \ - d += T1; h += T1; } while (0) -# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \ + d += T1; h += T1; } while (0) + +# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \ s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \ s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \ T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \ ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0) + static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num) {