X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fsha%2Fsha256.c;h=dbd5eb759e6dc128942cf235d56430da433b30ae;hp=b5a9da10045d4dab96d0c25c9d34705612919b4a;hb=619b94667cc7a097f6d1e2123c4f4c2c85afb8f7;hpb=c842261b1bf0570604585421f4a49027e60d1da9 diff --git a/crypto/sha/sha256.c b/crypto/sha/sha256.c index b5a9da1004..dbd5eb759e 100644 --- a/crypto/sha/sha256.c +++ b/crypto/sha/sha256.c @@ -1,52 +1,53 @@ /* crypto/sha/sha256.c */ /* ==================================================================== - * Copyright (c) 2004 The OpenSSL Project. All rights reserved. + * Copyright (c) 2004 The OpenSSL Project. All rights reserved + * according to the OpenSSL license [found in ../../LICENSE]. * ==================================================================== */ +#include +#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256) + #include #include -#include #include #include #include -const char *SHA256_version="SHA-256" OPENSSL_VERSION_PTEXT; +__fips_constseg +const char SHA256_version[]="SHA-256" OPENSSL_VERSION_PTEXT; int SHA224_Init (SHA256_CTX *c) { + memset (c,0,sizeof(*c)); c->h[0]=0xc1059ed8UL; c->h[1]=0x367cd507UL; c->h[2]=0x3070dd17UL; c->h[3]=0xf70e5939UL; c->h[4]=0xffc00b31UL; c->h[5]=0x68581511UL; c->h[6]=0x64f98fa7UL; c->h[7]=0xbefa4fa4UL; - c->Nl=0; c->Nh=0; - c->num=0; - return 1; + c->md_len=SHA224_DIGEST_LENGTH; + return 1; } int SHA256_Init (SHA256_CTX *c) { + memset (c,0,sizeof(*c)); c->h[0]=0x6a09e667UL; c->h[1]=0xbb67ae85UL; c->h[2]=0x3c6ef372UL; c->h[3]=0xa54ff53aUL; c->h[4]=0x510e527fUL; c->h[5]=0x9b05688cUL; c->h[6]=0x1f83d9abUL; c->h[7]=0x5be0cd19UL; - c->Nl=0; c->Nh=0; - c->num=0; - return 1; + c->md_len=SHA256_DIGEST_LENGTH; + return 1; } unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md) { SHA256_CTX c; - static unsigned char m[SHA256_DIGEST_LENGTH]; + static unsigned char m[SHA224_DIGEST_LENGTH]; + if (md == NULL) md=m; SHA224_Init(&c); SHA256_Update(&c,d,n); - SHA256_Final(m,&c); - if (md != NULL) memcpy (md,m,SHA224_DIGEST_LENGTH), - memset (m,0,sizeof(m)); - else md=m, - memset (m+SHA224_DIGEST_LENGTH,0,sizeof(m)-SHA256_DIGEST_LENGTH); + SHA256_Final(md,&c); OPENSSL_cleanse(&c,sizeof(c)); return(md); } @@ -64,35 +65,58 @@ unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md) return(md); } -#ifndef SHA_LONG_LOG2 -#define SHA_LONG_LOG2 2 /* default to 32 bits */ -#endif +int SHA224_Update(SHA256_CTX *c, const void *data, size_t len) +{ return SHA256_Update (c,data,len); } +int SHA224_Final (unsigned char *md, SHA256_CTX *c) +{ return SHA256_Final (md,c); } #define DATA_ORDER_IS_BIG_ENDIAN #define HASH_LONG SHA_LONG -#define HASH_LONG_LOG2 SHA_LONG_LOG2 #define HASH_CTX SHA256_CTX #define HASH_CBLOCK SHA_CBLOCK -#define HASH_LBLOCK SHA_LBLOCK +/* + * Note that FIPS180-2 discusses "Truncation of the Hash Function Output." + * default: case below covers for it. It's not clear however if it's + * permitted to truncate to amount of bytes not divisible by 4. I bet not, + * but if it is, then default: case shall be extended. For reference. + * Idea behind separate cases for pre-defined lenghts is to let the + * compiler decide if it's appropriate to unroll small loops. + */ #define HASH_MAKE_STRING(c,s) do { \ unsigned long ll; \ - ll=(c)->h[0]; HOST_l2c(ll,(s)); ll=(c)->h[1]; HOST_l2c(ll,(s)); \ - ll=(c)->h[2]; HOST_l2c(ll,(s)); ll=(c)->h[3]; HOST_l2c(ll,(s)); \ - ll=(c)->h[4]; HOST_l2c(ll,(s)); ll=(c)->h[5]; HOST_l2c(ll,(s)); \ - ll=(c)->h[6]; HOST_l2c(ll,(s)); ll=(c)->h[7]; HOST_l2c(ll,(s)); \ + unsigned int nn; \ + switch ((c)->md_len) \ + { case SHA224_DIGEST_LENGTH: \ + for (nn=0;nnh[nn]; (void)HOST_l2c(ll,(s)); } \ + break; \ + case SHA256_DIGEST_LENGTH: \ + for (nn=0;nnh[nn]; (void)HOST_l2c(ll,(s)); } \ + break; \ + default: \ + if ((c)->md_len > SHA256_DIGEST_LENGTH) \ + return 0; \ + for (nn=0;nn<(c)->md_len/4;nn++) \ + { ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \ + break; \ + } \ } while (0) #define HASH_UPDATE SHA256_Update #define HASH_TRANSFORM SHA256_Transform #define HASH_FINAL SHA256_Final -#define HASH_BLOCK_HOST_ORDER sha256_block_host_order #define HASH_BLOCK_DATA_ORDER sha256_block_data_order -void sha256_block_host_order (SHA256_CTX *ctx, const void *in, size_t num); +#ifndef SHA256_ASM +static +#endif void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num); #include "md32_common.h" +#ifndef SHA256_ASM +__fips_constseg static const SHA_LONG K256[64] = { 0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL, 0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL, @@ -126,43 +150,25 @@ static const SHA_LONG K256[64] = { #ifdef OPENSSL_SMALL_FOOTPRINT -static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) +static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num) { unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1,T2; - SHA_LONG X[16]; + SHA_LONG X[16],l; int i; + const unsigned char *data=in; while (num--) { a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; - if (host) - { - const SHA_LONG *W=in; - - for (i=0;i<16;i++) - { - T1 = X[i] = W[i]; - T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; - T2 = Sigma0(a) + Maj(a,b,c); - h = g; g = f; f = e; e = d + T1; - d = c; c = b; b = a; a = T1 + T2; - } - } - else + for (i=0;i<16;i++) { - const unsigned char *data=in; - SHA_LONG l; - - for (i=0;i<16;i++) - { - HOST_c2l(data,l); T1 = X[i] = l; - T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; - T2 = Sigma0(a) + Maj(a,b,c); - h = g; g = f; f = e; e = d + T1; - d = c; c = b; b = a; a = T1 + T2; - } + HOST_c2l(data,l); T1 = X[i] = l; + T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; + T2 = Sigma0(a) + Maj(a,b,c); + h = g; g = f; f = e; e = d + T1; + d = c; c = b; b = a; a = T1 + T2; } for (;i<64;i++) @@ -193,23 +199,25 @@ static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) #define ROUND_16_63(i,a,b,c,d,e,f,g,h,X) do { \ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \ - T1 = X[i&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ + T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0) -static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) +static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num) { unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1; SHA_LONG X[16]; int i; + const unsigned char *data=in; + const union { long one; char little; } is_endian = {1}; while (num--) { a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; - if (host) + if (!is_endian.little && sizeof(SHA_LONG)==4 && ((size_t)in%4)==0) { - const SHA_LONG *W=in; + const SHA_LONG *W=(const SHA_LONG *)data; T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h); T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g); @@ -227,10 +235,11 @@ static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c); T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b); T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a); + + data += SHA256_CBLOCK; } else { - const unsigned char *data=in; SHA_LONG l; HOST_c2l(data,l); T1 = X[0] = l; ROUND_00_15(0,a,b,c,d,e,f,g,h); @@ -270,14 +279,6 @@ static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) } #endif +#endif /* SHA256_ASM */ -/* - * Idea is to trade couple of cycles for some space. On IA-32 we save - * about 4K in "big footprint" case. In "small footprint" case any gain - * is appreciated:-) - */ -void HASH_BLOCK_HOST_ORDER (SHA256_CTX *ctx, const void *in, size_t num) -{ sha256_block (ctx,in,num,1); } - -void HASH_BLOCK_DATA_ORDER (SHA256_CTX *ctx, const void *in, size_t num) -{ sha256_block (ctx,in,num,0); } +#endif /* OPENSSL_NO_SHA256 */