X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fmd32_common.h;h=0cbcfaf8a20ba279480904765a39fcf47f84e875;hp=d6370521be1b3d6caa95573f90715a59d9da31dc;hb=5f1841cdcae459924c3d1d92fcaf3110068c7cda;hpb=531b2cf7e92d3e6d4168e77752af87fb027024f5 diff --git a/crypto/md32_common.h b/crypto/md32_common.h index d6370521be..0cbcfaf8a2 100644 --- a/crypto/md32_common.h +++ b/crypto/md32_common.h @@ -1,6 +1,6 @@ /* crypto/md32_common.h */ /* ==================================================================== - * Copyright (c) 1999 The OpenSSL Project. All rights reserved. + * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -77,7 +77,7 @@ * ... * HASH_LONG Nl,Nh; * HASH_LONG data[HASH_LBLOCK]; - * int num; + * unsigned int num; * ... * } HASH_CTX; * HASH_UPDATE @@ -94,6 +94,8 @@ * in original (data) byte order, implemented externally (it * actually is optional if data and host are of the same * "endianess"). + * HASH_MAKE_STRING + * macro convering context variables to an ASCII hash string. * * Optional macros: * @@ -177,28 +179,36 @@ */ #undef ROTATE #ifndef PEDANTIC -# if defined(_MSC_VER) -# define ROTATE(a,n) _lrotl(a,n) -# elif defined(__GNUC__) && __GNUC__>=2 +# if defined(_MSC_VER) || defined(__ICC) +# define ROTATE(a,n) _lrotl(a,n) +# elif defined(__MWERKS__) +# if defined(__POWERPC__) +# define ROTATE(a,n) __rlwinm(a,n,0,31) +# elif defined(__MC68K__) + /* Motorola specific tweak. */ +# define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) ) +# else +# define ROTATE(a,n) __rol(a,n) +# endif +# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) /* * Some GNU C inline assembler templates. Note that these are * rotates by *constant* number of bits! But that's exactly * what we need here... - * * */ -# if defined(__i386) +# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) # define ROTATE(a,n) ({ register unsigned int ret; \ - asm volatile ( \ + asm ( \ "roll %1,%0" \ : "=r"(ret) \ : "I"(n), "0"(a) \ : "cc"); \ ret; \ }) -# elif defined(__powerpc) +# elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) # define ROTATE(a,n) ({ register unsigned int ret; \ - asm volatile ( \ + asm ( \ "rlwinm %0,%1,%2,0,31" \ : "=r"(ret) \ : "r"(a), "I"(n)); \ @@ -206,39 +216,6 @@ }) # endif # endif - -/* - * Engage compiler specific "fetch in reverse byte order" - * intrinsic function if available. - */ -# if defined(__GNUC__) && __GNUC__>=2 - /* some GNU C inline assembler templates by */ -# if defined(__i386) && !defined(I386_ONLY) -# define BE_FETCH32(a) ({ register unsigned int l=(a);\ - asm volatile ( \ - "bswapl %0" \ - : "=r"(l) : "0"(l)); \ - l; \ - }) -# elif defined(__powerpc) -# define LE_FETCH32(a) ({ register unsigned int l; \ - asm volatile ( \ - "lwbrx %0,0,%1" \ - : "=r"(l) \ - : "r"(a)); \ - l; \ - }) - -# elif defined(__sparc) && defined(ULTRASPARC) -# define LE_FETCH32(a) ({ register unsigned int l; \ - asm volatile ( \ - "lda [%1]#ASI_PRIMARY_LITTLE,%0"\ - : "=r"(l) \ - : "r"(a)); \ - l; \ - }) -# endif -# endif #endif /* PEDANTIC */ #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ @@ -290,32 +267,16 @@ # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER # endif -# elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) -# ifndef HOST_FETCH32 -# ifdef LE_FETCH32 -# define HOST_FETCH32(p,l) LE_FETCH32(p) -# elif defined(REVERSE_FETCH32) -# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) -# endif -# endif # endif #elif defined(L_ENDIAN) # if defined(DATA_ORDER_IS_LITTLE_ENDIAN) # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER # endif -# elif defined(DATA_ORDER_IS_BIG_ENDIAN) -# ifndef HOST_FETCH32 -# ifdef BE_FETCH32 -# define HOST_FETCH32(p,l) BE_FETCH32(p) -# elif defined(REVERSE_FETCH32) -# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) -# endif -# endif # endif #endif -#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_BLOCK_DATA_ORDER_ALIGNED!=1 +#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) #ifndef HASH_BLOCK_DATA_ORDER #error "HASH_BLOCK_DATA_ORDER must be defined!" #endif @@ -323,11 +284,32 @@ #if defined(DATA_ORDER_IS_BIG_ENDIAN) +#ifndef PEDANTIC +# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) + /* + * This gives ~30-40% performance improvement in SHA-256 compiled + * with gcc [on P4]. Well, first macro to be frank. We can pull + * this trick on x86* platforms only, because these CPUs can fetch + * unaligned data without raising an exception. + */ +# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ + asm ("bswapl %0":"=r"(r):"0"(r)); \ + (c)+=4; (l)=r; }) +# define HOST_l2c(l,c) ({ unsigned int r=(l); \ + asm ("bswapl %0":"=r"(r):"0"(r)); \ + *((unsigned int *)(c))=r; (c)+=4; r; }) +# endif +# endif +#endif + +#ifndef HOST_c2l #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ l|=(((unsigned long)(*((c)++)))<<16), \ l|=(((unsigned long)(*((c)++)))<< 8), \ l|=(((unsigned long)(*((c)++))) ), \ l) +#endif #define HOST_p_c2l(c,l,n) { \ switch (n) { \ case 0: l =((unsigned long)(*((c)++)))<<24; \ @@ -351,19 +333,29 @@ case 2: l|=((unsigned long)(*(--(c))))<<16; \ case 1: l|=((unsigned long)(*(--(c))))<<24; \ } } +#ifndef HOST_l2c #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ *((c)++)=(unsigned char)(((l)>>16)&0xff), \ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ *((c)++)=(unsigned char)(((l) )&0xff), \ l) +#endif #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) +#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) + /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ +# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) +# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) +#endif + +#ifndef HOST_c2l #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ l|=(((unsigned long)(*((c)++)))<< 8), \ l|=(((unsigned long)(*((c)++)))<<16), \ l|=(((unsigned long)(*((c)++)))<<24), \ l) +#endif #define HOST_p_c2l(c,l,n) { \ switch (n) { \ case 0: l =((unsigned long)(*((c)++))); \ @@ -387,11 +379,13 @@ case 2: l|=((unsigned long)(*(--(c))))<< 8; \ case 1: l|=((unsigned long)(*(--(c)))); \ } } +#ifndef HOST_l2c #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ *((c)++)=(unsigned char)(((l)>>16)&0xff), \ *((c)++)=(unsigned char)(((l)>>24)&0xff), \ l) +#endif #endif @@ -399,20 +393,21 @@ * Time for some action:-) */ -void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) +int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) { + const unsigned char *data=data_; register HASH_LONG * p; - register unsigned long l; - int sw,sc,ew,ec; + register HASH_LONG l; + size_t sw,sc,ew,ec; - if (len==0) return; + if (len==0) return 1; - l=(c->Nl+(len<<3))&0xffffffffL; + l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to * Wei Dai for pointing it out. */ if (l < c->Nl) /* overflow */ c->Nh++; - c->Nh+=(len>>29); + c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ c->Nl=l; if (c->num != 0) @@ -435,7 +430,7 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) } else { - c->num+=len; + c->num+=(unsigned int)len; if ((sc+len) < 4) /* ugly, add char's to a word */ { l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; @@ -444,7 +439,10 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) { ew=(c->num>>2); ec=(c->num&0x03); - l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; + if (sc) + l=p[sw]; + HOST_p_c2l(data,l,sc); + p[sw++]=l; for (; sw < ew; sw++) { HOST_c2l(data,l); p[sw]=l; @@ -454,21 +452,22 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) HOST_c2l_p(data,l,ec); p[sw]=l; } } - return; + return 1; } } sw=len/HASH_CBLOCK; if (sw > 0) { -#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_BLOCK_DATA_ORDER_ALIGNED!=1 +#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) /* * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined * only if sizeof(HASH_LONG)==4. */ - if ((((unsigned long)data)%4) == 0) + if ((((size_t)data)%4) == 0) { - HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,sw); + /* data is properly aligned so that we can cast it: */ + HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw); sw*=HASH_CBLOCK; data+=sw; len-=sw; @@ -507,14 +506,16 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) HOST_c2l_p(data,l,ec); *p=l; } + return 1; } -void HASH_TRANSFORM (HASH_CTX *c, unsigned char *data) +void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) { -#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_BLOCK_DATA_ORDER_ALIGNED!=1 - if ((((unsigned long)data)%4) == 0) - HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,1); +#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) + if ((((size_t)data)%4) == 0) + /* data is properly aligned so that we can cast it: */ + HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1); else #if !defined(HASH_BLOCK_DATA_ORDER) { @@ -524,12 +525,12 @@ void HASH_TRANSFORM (HASH_CTX *c, unsigned char *data) #endif #endif #if defined(HASH_BLOCK_DATA_ORDER) - HASH_BLOCK_DATA_ORDER (c,(const unsigned char *)data,1); + HASH_BLOCK_DATA_ORDER (c,data,1); #endif } -void HASH_FINAL (unsigned char *md, HASH_CTX *c) +int HASH_FINAL (unsigned char *md, HASH_CTX *c) { register HASH_LONG *p; register unsigned long l; @@ -579,14 +580,41 @@ void HASH_FINAL (unsigned char *md, HASH_CTX *c) #endif HASH_BLOCK_HOST_ORDER (c,p,1); - l=c->A; HOST_l2c(l,md); - l=c->B; HOST_l2c(l,md); - l=c->C; HOST_l2c(l,md); - l=c->D; HOST_l2c(l,md); +#ifndef HASH_MAKE_STRING +#error "HASH_MAKE_STRING must be defined!" +#else + HASH_MAKE_STRING(c,md); +#endif c->num=0; /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack * but I'm not worried :-) - memset((void *)c,0,sizeof(HASH_CTX)); + OPENSSL_cleanse((void *)c,sizeof(HASH_CTX)); */ + return 1; } + +#ifndef MD32_REG_T +#define MD32_REG_T long +/* + * This comment was originaly written for MD5, which is why it + * discusses A-D. But it basically applies to all 32-bit digests, + * which is why it was moved to common header file. + * + * In case you wonder why A-D are declared as long and not + * as MD5_LONG. Doing so results in slight performance + * boost on LP64 architectures. The catch is we don't + * really care if 32 MSBs of a 64-bit register get polluted + * with eventual overflows as we *save* only 32 LSBs in + * *either* case. Now declaring 'em long excuses the compiler + * from keeping 32 MSBs zeroed resulting in 13% performance + * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. + * Well, to be honest it should say that this *prevents* + * performance degradation. + * + * Apparently there're LP64 compilers that generate better + * code if A-D are declared int. Most notably GCC-x86_64 + * generates better code. + * + */ +#endif