-/* crypto/sha/sha512.c */
-/* ====================================================================
- * Copyright (c) 2004 The OpenSSL Project. All rights reserved
- * according to the OpenSSL license [found in ../../LICENSE].
- * ====================================================================
+/*
+ * Copyright 2004-2018 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
*/
+
#include <openssl/opensslconf.h>
/*-
* IMPLEMENTATION NOTES.
* As this implementation relies on 64-bit integer type, it's totally
* inappropriate for platforms which don't support it, most notably
* 16-bit platforms.
- * <appro@fy.chalmers.se>
*/
#include <stdlib.h>
#include <string.h>
#include <openssl/sha.h>
#include <openssl/opensslv.h>
-#include "cryptlib.h"
-
-const char SHA512_version[] = "SHA-512" OPENSSL_VERSION_PTEXT;
+#include "internal/cryptlib.h"
+#include "crypto/sha.h"
#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || \
# define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
#endif
+#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
+# define U64(C) C##UI64
+#elif defined(__arch64__)
+# define U64(C) C##UL
+#else
+# define U64(C) C##ULL
+#endif
+
+int sha512_224_init(SHA512_CTX *c)
+{
+ c->h[0] = U64(0x8c3d37c819544da2);
+ c->h[1] = U64(0x73e1996689dcd4d6);
+ c->h[2] = U64(0x1dfab7ae32ff9c82);
+ c->h[3] = U64(0x679dd514582f9fcf);
+ c->h[4] = U64(0x0f6d2b697bd44da8);
+ c->h[5] = U64(0x77e36f7304c48942);
+ c->h[6] = U64(0x3f9d85a86a1d36c8);
+ c->h[7] = U64(0x1112e6ad91d692a1);
+
+ c->Nl = 0;
+ c->Nh = 0;
+ c->num = 0;
+ c->md_len = SHA224_DIGEST_LENGTH;
+ return 1;
+}
+
+int sha512_256_init(SHA512_CTX *c)
+{
+ c->h[0] = U64(0x22312194fc2bf72c);
+ c->h[1] = U64(0x9f555fa3c84c64c2);
+ c->h[2] = U64(0x2393b86b6f53b151);
+ c->h[3] = U64(0x963877195940eabd);
+ c->h[4] = U64(0x96283ee2a88effe3);
+ c->h[5] = U64(0xbe5e1e2553863992);
+ c->h[6] = U64(0x2b0199fc2c85b8aa);
+ c->h[7] = U64(0x0eb72ddc81c52ca2);
+
+ c->Nl = 0;
+ c->Nh = 0;
+ c->num = 0;
+ c->md_len = SHA256_DIGEST_LENGTH;
+ return 1;
+}
+
int SHA384_Init(SHA512_CTX *c)
{
c->h[0] = U64(0xcbbb9d5dc1059ed8);
p[n] = 0x80; /* There always is a room for one */
n++;
- if (n > (sizeof(c->u) - 16))
- memset(p + n, 0, sizeof(c->u) - n), n = 0,
- sha512_block_data_order(c, p, 1);
+ if (n > (sizeof(c->u) - 16)) {
+ memset(p + n, 0, sizeof(c->u) - n);
+ n = 0;
+ sha512_block_data_order(c, p, 1);
+ }
memset(p + n, 0, sizeof(c->u) - 16 - n);
#ifdef B_ENDIAN
return 0;
switch (c->md_len) {
- /* Let compiler decide if it's appropriate to unroll... */
+ /* Let compiler decide if it's appropriate to unroll... */
+ case SHA224_DIGEST_LENGTH:
+ for (n = 0; n < SHA224_DIGEST_LENGTH / 8; n++) {
+ SHA_LONG64 t = c->h[n];
+
+ *(md++) = (unsigned char)(t >> 56);
+ *(md++) = (unsigned char)(t >> 48);
+ *(md++) = (unsigned char)(t >> 40);
+ *(md++) = (unsigned char)(t >> 32);
+ *(md++) = (unsigned char)(t >> 24);
+ *(md++) = (unsigned char)(t >> 16);
+ *(md++) = (unsigned char)(t >> 8);
+ *(md++) = (unsigned char)(t);
+ }
+ /*
+ * For 224 bits, there are four bytes left over that have to be
+ * processed separately.
+ */
+ {
+ SHA_LONG64 t = c->h[SHA224_DIGEST_LENGTH / 8];
+
+ *(md++) = (unsigned char)(t >> 56);
+ *(md++) = (unsigned char)(t >> 48);
+ *(md++) = (unsigned char)(t >> 40);
+ *(md++) = (unsigned char)(t >> 32);
+ }
+ break;
+ case SHA256_DIGEST_LENGTH:
+ for (n = 0; n < SHA256_DIGEST_LENGTH / 8; n++) {
+ SHA_LONG64 t = c->h[n];
+
+ *(md++) = (unsigned char)(t >> 56);
+ *(md++) = (unsigned char)(t >> 48);
+ *(md++) = (unsigned char)(t >> 40);
+ *(md++) = (unsigned char)(t >> 32);
+ *(md++) = (unsigned char)(t >> 24);
+ *(md++) = (unsigned char)(t >> 16);
+ *(md++) = (unsigned char)(t >> 8);
+ *(md++) = (unsigned char)(t);
+ }
+ break;
case SHA384_DIGEST_LENGTH:
for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) {
SHA_LONG64 t = c->h[n];
*(md++) = (unsigned char)(t);
}
break;
- /* ... as well as make sure md_len is not abused. */
+ /* ... as well as make sure md_len is not abused. */
default:
return 0;
}
if ((size_t)data % sizeof(c->u.d[0]) != 0)
while (len >= sizeof(c->u))
memcpy(p, data, sizeof(c->u)),
- sha512_block_data_order(c, p, 1),
- len -= sizeof(c->u), data += sizeof(c->u);
+ sha512_block_data_order(c, p, 1),
+ len -= sizeof(c->u), data += sizeof(c->u);
else
#endif
sha512_block_data_order(c, data, len / sizeof(c->u)),
- data += len, len %= sizeof(c->u), data -= len;
+ data += len, len %= sizeof(c->u), data -= len;
}
if (len != 0)
SHA512_Update(&c, d, n);
SHA512_Final(md, &c);
OPENSSL_cleanse(&c, sizeof(c));
- return (md);
+ return md;
}
unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md)
SHA512_Update(&c, d, n);
SHA512_Final(md, &c);
OPENSSL_cleanse(&c, sizeof(c));
- return (md);
+ return md;
}
#ifndef SHA512_ASM
};
# ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__GNUC__) && __GNUC__>=2 && \
+ !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__x86_64) || defined(__x86_64__)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("rorq %1,%0" \
: "=r"(ret) \
: "J"(n),"0"(a) \
# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN)
# if defined(I386_ONLY)
# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
- unsigned int hi=p[0],lo=p[1]; \
+ unsigned int hi=p[0],lo=p[1]; \
asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
"roll $16,%%eax; roll $16,%%edx; "\
- "xchgb %%ah,%%al;xchgb %%dh,%%dl;" \
+ "xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
: "=a"(lo),"=d"(hi) \
: "0"(lo),"1"(hi) : "cc"); \
((SHA_LONG64)hi)<<32|lo; })
# else
# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
- unsigned int hi=p[0],lo=p[1]; \
+ unsigned int hi=p[0],lo=p[1]; \
asm ("bswapl %0; bswapl %1;" \
: "=r"(lo),"=r"(hi) \
: "0"(lo),"1"(hi)); \
((SHA_LONG64)hi)<<32|lo; })
# endif
# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("rotrdi %0,%1,%2" \
: "=r"(ret) \
: "r"(a),"K"(n)); ret; })
# elif defined(__aarch64__)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("ror %0,%1,%2" \
: "=r"(ret) \
: "r"(a),"I"(n)); ret; })
# if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
-# define PULL64(x) ({ SHA_LONG64 ret; \
+# define PULL64(x) ({ SHA_LONG64 ret; \
asm ("rev %0,%1" \
: "=r"(ret) \
- : "r"(*((const SHA_LONG64 *)(&(x))))); ret; })
+ : "r"(*((const SHA_LONG64 *)(&(x))))); ret; })
# endif
# endif
# elif defined(_MSC_VER)
# pragma intrinsic(_rotr64)
# define ROTR(a,n) _rotr64((a),n)
# endif
-# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(_M_IX86) && !defined(OPENSSL_NO_ASM) && \
+ !defined(OPENSSL_NO_INLINE_ASM)
# if defined(I386_ONLY)
static SHA_LONG64 __fastcall __pull64be(const void *x)
{
- _asm mov edx,[ecx + 0]
- _asm mov eax,[ecx + 4]
-_asm xchg dh, dl
- _asm xchg ah, al
- _asm rol edx, 16 _asm rol eax, 16 _asm xchg dh, dl _asm xchg ah, al}
+ _asm mov edx,[ecx + 0]
+ _asm mov eax,[ecx + 4]
+ _asm xchg dh, dl
+ _asm xchg ah, al
+ _asm rol edx, 16
+ _asm rol eax, 16
+ _asm xchg dh, dl
+ _asm xchg ah, al
+}
# else
static SHA_LONG64 __fastcall __pull64be(const void *x)
{
- _asm mov edx,[ecx + 0]
- _asm mov eax,[ecx + 4]
-_asm bswap edx _asm bswap eax}
+ _asm mov edx,[ecx + 0]
+ _asm mov eax,[ecx + 4]
+ _asm bswap edx
+ _asm bswap eax
+}
# endif
# define PULL64(x) __pull64be(&(x))
-# if _MSC_VER<=1200
-# pragma inline_depth(0)
-# endif
# endif
# endif
# endif
# define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
# define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
# define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
/*
* This code should give better results on 32-bit CPU with less than
* ~24 registers, both size and performance wise...
- */ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
- size_t num)
+ */
+
+static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+ size_t num)
{
const SHA_LONG64 *W = in;
SHA_LONG64 A, E, T;
}
# elif defined(OPENSSL_SMALL_FOOTPRINT)
+
static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
size_t num)
{
}
# else
-# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
+# define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \
h = Sigma0(a) + Maj(a,b,c); \
- d += T1; h += T1; } while (0)
-# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \
+ d += T1; h += T1; } while (0)
+
+# define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \
s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \
s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \
T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \
ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0)
+
static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
size_t num)
{