X-Git-Url: https://git.openssl.org/gitweb/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fmd32_common.h;h=0cbcfaf8a20ba279480904765a39fcf47f84e875;hp=307ec30dfc32417eef04a0dd4e5e0050a3d5530e;hb=76ef6ac956490b4d9f78445599ce52510021858a;hpb=1a979201d5b099edbc1e3ad2fb157ee058091a40 diff --git a/crypto/md32_common.h b/crypto/md32_common.h index 307ec30dfc..0cbcfaf8a2 100644 --- a/crypto/md32_common.h +++ b/crypto/md32_common.h @@ -77,7 +77,7 @@ * ... * HASH_LONG Nl,Nh; * HASH_LONG data[HASH_LBLOCK]; - * int num; + * unsigned int num; * ... * } HASH_CTX; * HASH_UPDATE @@ -195,7 +195,6 @@ * Some GNU C inline assembler templates. Note that these are * rotates by *constant* number of bits! But that's exactly * what we need here... - * * */ # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) @@ -207,7 +206,7 @@ : "cc"); \ ret; \ }) -# elif defined(__powerpc) || defined(__ppc) +# elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) # define ROTATE(a,n) ({ register unsigned int ret; \ asm ( \ "rlwinm %0,%1,%2,0,31" \ @@ -217,39 +216,6 @@ }) # endif # endif - -/* - * Engage compiler specific "fetch in reverse byte order" - * intrinsic function if available. - */ -# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) - /* some GNU C inline assembler templates by */ -# if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY) -# define BE_FETCH32(a) ({ register unsigned int l=(a);\ - asm ( \ - "bswapl %0" \ - : "=r"(l) : "0"(l)); \ - l; \ - }) -# elif defined(__powerpc) -# define LE_FETCH32(a) ({ register unsigned int l; \ - asm ( \ - "lwbrx %0,0,%1" \ - : "=r"(l) \ - : "r"(a)); \ - l; \ - }) - -# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) -# define LE_FETCH32(a) ({ register unsigned int l; \ - asm ( \ - "lda [%1]#ASI_PRIMARY_LITTLE,%0"\ - : "=r"(l) \ - : "r"(a)); \ - l; \ - }) -# endif -# endif #endif /* PEDANTIC */ #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ @@ -301,28 +267,12 @@ # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER # endif -# elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) -# ifndef HOST_FETCH32 -# ifdef LE_FETCH32 -# define HOST_FETCH32(p,l) LE_FETCH32(p) -# elif defined(REVERSE_FETCH32) -# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) -# endif -# endif # endif #elif defined(L_ENDIAN) # if defined(DATA_ORDER_IS_LITTLE_ENDIAN) # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER # endif -# elif defined(DATA_ORDER_IS_BIG_ENDIAN) -# ifndef HOST_FETCH32 -# ifdef BE_FETCH32 -# define HOST_FETCH32(p,l) BE_FETCH32(p) -# elif defined(REVERSE_FETCH32) -# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) -# endif -# endif # endif #endif @@ -334,11 +284,32 @@ #if defined(DATA_ORDER_IS_BIG_ENDIAN) +#ifndef PEDANTIC +# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) + /* + * This gives ~30-40% performance improvement in SHA-256 compiled + * with gcc [on P4]. Well, first macro to be frank. We can pull + * this trick on x86* platforms only, because these CPUs can fetch + * unaligned data without raising an exception. + */ +# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ + asm ("bswapl %0":"=r"(r):"0"(r)); \ + (c)+=4; (l)=r; }) +# define HOST_l2c(l,c) ({ unsigned int r=(l); \ + asm ("bswapl %0":"=r"(r):"0"(r)); \ + *((unsigned int *)(c))=r; (c)+=4; r; }) +# endif +# endif +#endif + +#ifndef HOST_c2l #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ l|=(((unsigned long)(*((c)++)))<<16), \ l|=(((unsigned long)(*((c)++)))<< 8), \ l|=(((unsigned long)(*((c)++))) ), \ l) +#endif #define HOST_p_c2l(c,l,n) { \ switch (n) { \ case 0: l =((unsigned long)(*((c)++)))<<24; \ @@ -362,19 +333,29 @@ case 2: l|=((unsigned long)(*(--(c))))<<16; \ case 1: l|=((unsigned long)(*(--(c))))<<24; \ } } +#ifndef HOST_l2c #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ *((c)++)=(unsigned char)(((l)>>16)&0xff), \ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ *((c)++)=(unsigned char)(((l) )&0xff), \ l) +#endif #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) +#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) + /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ +# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) +# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) +#endif + +#ifndef HOST_c2l #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ l|=(((unsigned long)(*((c)++)))<< 8), \ l|=(((unsigned long)(*((c)++)))<<16), \ l|=(((unsigned long)(*((c)++)))<<24), \ l) +#endif #define HOST_p_c2l(c,l,n) { \ switch (n) { \ case 0: l =((unsigned long)(*((c)++))); \ @@ -398,11 +379,13 @@ case 2: l|=((unsigned long)(*(--(c))))<< 8; \ case 1: l|=((unsigned long)(*(--(c)))); \ } } +#ifndef HOST_l2c #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ *((c)++)=(unsigned char)(((l)>>16)&0xff), \ *((c)++)=(unsigned char)(((l)>>24)&0xff), \ l) +#endif #endif @@ -410,21 +393,21 @@ * Time for some action:-) */ -int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) +int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) { const unsigned char *data=data_; register HASH_LONG * p; - register unsigned long l; - int sw,sc,ew,ec; + register HASH_LONG l; + size_t sw,sc,ew,ec; if (len==0) return 1; - l=(c->Nl+(len<<3))&0xffffffffL; + l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to * Wei Dai for pointing it out. */ if (l < c->Nl) /* overflow */ c->Nh++; - c->Nh+=(len>>29); + c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ c->Nl=l; if (c->num != 0) @@ -447,7 +430,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) } else { - c->num+=len; + c->num+=(unsigned int)len; if ((sc+len) < 4) /* ugly, add char's to a word */ { l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; @@ -481,7 +464,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined * only if sizeof(HASH_LONG)==4. */ - if ((((unsigned long)data)%4) == 0) + if ((((size_t)data)%4) == 0) { /* data is properly aligned so that we can cast it: */ HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw); @@ -530,7 +513,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) { #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) - if ((((unsigned long)data)%4) == 0) + if ((((size_t)data)%4) == 0) /* data is properly aligned so that we can cast it: */ HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1); else