X-Git-Url: https://git.openssl.org/gitweb/?a=blobdiff_plain;f=crypto%2Fmd32_common.h;h=353d2b96add8be240fc99a65ee01f3be64debbb8;hb=af1cb47e65499c23f2dc152d6b26a55e11d5af2b;hp=e739da21217abff9f1bbec478790a8a0e6e4f9b2;hpb=a4af39ac4482355ffdd61fb61231a0c79b96997b;p=openssl.git diff --git a/crypto/md32_common.h b/crypto/md32_common.h index e739da2121..353d2b96ad 100644 --- a/crypto/md32_common.h +++ b/crypto/md32_common.h @@ -179,11 +179,18 @@ */ #undef ROTATE #ifndef PEDANTIC -# if defined(_MSC_VER) +# if 0 /* defined(_MSC_VER) */ # define ROTATE(a,n) _lrotl(a,n) # elif defined(__MWERKS__) -# define ROTATE(a,n) __rol(a,n) -# elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) +# if defined(__POWERPC__) +# define ROTATE(a,n) __rlwinm(a,n,0,31) +# elif defined(__MC68K__) + /* Motorola specific tweak. */ +# define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) ) +# else +# define ROTATE(a,n) __rol(a,n) +# endif +# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) /* * Some GNU C inline assembler templates. Note that these are * rotates by *constant* number of bits! But that's exactly @@ -191,18 +198,18 @@ * * */ -# if defined(__i386) && !defined(__sun) +# if defined(__i386) || defined(__i386__) # define ROTATE(a,n) ({ register unsigned int ret; \ - asm volatile ( \ + asm ( \ "roll %1,%0" \ : "=r"(ret) \ : "I"(n), "0"(a) \ : "cc"); \ ret; \ }) -# elif defined(__powerpc) +# elif defined(__powerpc) || defined(__ppc) # define ROTATE(a,n) ({ register unsigned int ret; \ - asm volatile ( \ + asm ( \ "rlwinm %0,%1,%2,0,31" \ : "=r"(ret) \ : "r"(a), "I"(n)); \ @@ -215,27 +222,27 @@ * Engage compiler specific "fetch in reverse byte order" * intrinsic function if available. */ -# if defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) +# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) /* some GNU C inline assembler templates by */ -# if defined(__i386) && !defined(I386_ONLY) +# if (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY) # define BE_FETCH32(a) ({ register unsigned int l=(a);\ - asm volatile ( \ + asm ( \ "bswapl %0" \ : "=r"(l) : "0"(l)); \ l; \ }) # elif defined(__powerpc) # define LE_FETCH32(a) ({ register unsigned int l; \ - asm volatile ( \ + asm ( \ "lwbrx %0,0,%1" \ : "=r"(l) \ : "r"(a)); \ l; \ }) -# elif defined(__sparc) && defined(ULTRASPARC) +# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) # define LE_FETCH32(a) ({ register unsigned int l; \ - asm volatile ( \ + asm ( \ "lda [%1]#ASI_PRIMARY_LITTLE,%0"\ : "=r"(l) \ : "r"(a)); \ @@ -403,13 +410,14 @@ * Time for some action:-) */ -void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) +int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) { + const unsigned char *data=data_; register HASH_LONG * p; register unsigned long l; int sw,sc,ew,ec; - if (len==0) return; + if (len==0) return 1; l=(c->Nl+(len<<3))&0xffffffffL; /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to @@ -458,7 +466,7 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) HOST_c2l_p(data,l,ec); p[sw]=l; } } - return; + return 1; } } @@ -512,6 +520,7 @@ void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len) HOST_c2l_p(data,l,ec); *p=l; } + return 1; } @@ -535,7 +544,7 @@ void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) } -void HASH_FINAL (unsigned char *md, HASH_CTX *c) +int HASH_FINAL (unsigned char *md, HASH_CTX *c) { register HASH_LONG *p; register unsigned long l; @@ -596,4 +605,5 @@ void HASH_FINAL (unsigned char *md, HASH_CTX *c) * but I'm not worried :-) memset((void *)c,0,sizeof(HASH_CTX)); */ + return 1; }