X-Git-Url: https://git.openssl.org/gitweb/?a=blobdiff_plain;f=crypto%2Fmd32_common.h;h=147a7a00c30e3649ad5695cc49b3776a0720d594;hb=76c15d790e07f6cc098be2d7b7f6ddc8acd11ca6;hp=61bcd9786f8679668d706b78c0691a0d3c51f1d6;hpb=f8d6be3f8170c4aa3bea1618994f912629f3d0c3;p=openssl.git diff --git a/crypto/md32_common.h b/crypto/md32_common.h index 61bcd9786f..147a7a00c3 100644 --- a/crypto/md32_common.h +++ b/crypto/md32_common.h @@ -142,8 +142,10 @@ */ #undef ROTATE #ifndef PEDANTIC -# if defined(_MSC_VER) || defined(__ICC) +# if defined(_MSC_VER) # define ROTATE(a,n) _lrotl(a,n) +# elif defined(__ICC) +# define ROTATE(a,n) _rotl(a,n) # elif defined(__MWERKS__) # if defined(__POWERPC__) # define ROTATE(a,n) __rlwinm(a,n,0,31) @@ -165,7 +167,7 @@ asm ( \ "roll %1,%0" \ : "=r"(ret) \ - : "I"(n), "0"(a) \ + : "I"(n), "0"((unsigned int)(a)) \ : "cc"); \ ret; \ }) @@ -213,6 +215,24 @@ asm ("bswapl %0":"=r"(r):"0"(r)); \ *((unsigned int *)(c))=r; (c)+=4; r; }) # endif +# elif defined(__aarch64__) +# if defined(__BYTE_ORDER__) +# if defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ +# define HOST_c2l(c,l) ({ unsigned int r; \ + asm ("rev %w0,%w1" \ + :"=r"(r) \ + :"r"(*((const unsigned int *)(c))));\ + (c)+=4; (l)=r; }) +# define HOST_l2c(l,c) ({ unsigned int r; \ + asm ("rev %w0,%w1" \ + :"=r"(r) \ + :"r"((unsigned int)(l)));\ + *((unsigned int *)(c))=r; (c)+=4; r; }) +# elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ +# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, (l)) +# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, (l)) +# endif +# endif # endif # endif #endif @@ -241,11 +261,11 @@ #ifndef PEDANTIC # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) # if defined(__s390x__) -# define HOST_c2l(c,l) ({ asm ("lrv %0,0(%1)" \ - :"=r"(l) : "r"(c)); \ +# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \ + :"=d"(l) :"m"(*(const unsigned int *)(c)));\ (c)+=4; (l); }) -# define HOST_l2c(l,c) ({ asm ("strv %0,0(%1)" \ - : : "r"(l),"r"(c) : "memory"); \ +# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \ + :"=m"(*(unsigned int *)(c)) :"d"(l));\ (c)+=4; (l); }) # endif # endif @@ -293,7 +313,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) * Wei Dai for pointing it out. */ if (l < c->Nl) /* overflow */ c->Nh++; - c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ + c->Nh+=(HASH_LONG)(len>>29); /* might cause compiler warning on 16-bit */ c->Nl=l; n = c->num; @@ -331,7 +351,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) if (len != 0) { p = (unsigned char *)c->data; - c->num = len; + c->num = (unsigned int)len; memcpy (p,data,len); } return 1; @@ -383,6 +403,7 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c) } #ifndef MD32_REG_T +#if defined(__alpha) || defined(__sparcv9) || defined(__mips) #define MD32_REG_T long /* * This comment was originaly written for MD5, which is why it @@ -400,9 +421,15 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c) * Well, to be honest it should say that this *prevents* * performance degradation. * - * Apparently there're LP64 compilers that generate better - * code if A-D are declared int. Most notably GCC-x86_64 - * generates better code. + */ +#else +/* + * Above is not absolute and there are LP64 compilers that + * generate better code if MD32_REG_T is defined int. The above + * pre-processor condition reflects the circumstances under which + * the conclusion was made and is subject to further extension. * */ +#define MD32_REG_T int +#endif #endif